From: Felix Fietkau <nbd@openwrt.org>
Date: Sat, 16 Jun 2007 01:56:04 +0000 (+0000)
Subject: prepare for the transition to linux 2.6.22 - make it possible to override the kernel... 
X-Git-Tag: reboot~28921
X-Git-Url: http://git.cdn.openwrt.org/?a=commitdiff_plain;h=f019ac4dc9b89958657dd8c9d81bb2e270c938e7;p=openwrt%2Fstaging%2Fpepe2k.git

prepare for the transition to linux 2.6.22 - make it possible to override the kernel version in the target makefile

SVN-Revision: 7644
---

diff --git a/include/kernel-build.mk b/include/kernel-build.mk
index 71b9d8000f..83d0d5e6eb 100644
--- a/include/kernel-build.mk
+++ b/include/kernel-build.mk
@@ -11,9 +11,11 @@ include $(INCLUDE_DIR)/host.mk
 include $(INCLUDE_DIR)/kernel.mk
 include $(INCLUDE_DIR)/prereq.mk
 
-LINUX_CONFIG ?= ./config/default
+GENERIC_LINUX_CONFIG:=$(GENERIC_PLATFORM_DIR)/config-$(shell [ -f "$(GENERIC_PLATFORM_DIR)/config-$(KERNEL_PATCHVER)" ] && echo "$(KERNEL_PATCHVER)" || echo template ) 
+LINUX_CONFIG_DIR ?= ./config$(shell [ -d "./config-$(KERNEL_PATCHVER)" ] && printf -- "-$(KERNEL_PATCHVER)" || true )
+LINUX_CONFIG ?= $(LINUX_CONFIG_DIR)/default
 
--include $(TOPDIR)/target/linux/generic-$(KERNEL)/config-template
+-include $(GENERIC_LINUX_CONFIG)
 -include $(LINUX_CONFIG)
 
 ifneq ($(CONFIG_ATM),)
@@ -166,9 +168,9 @@ prepare: $(LINUX_DIR)/.configured
 compile: $(LINUX_DIR)/.modules
 menuconfig: $(LINUX_DIR)/.prepared FORCE
 	$(call Kernel/Configure)
-	$(SCRIPT_DIR)/config.pl '+' $(GENERIC_PLATFORM_DIR)/config-template $(LINUX_CONFIG) > $(LINUX_DIR)/.config
+	$(SCRIPT_DIR)/config.pl '+' $(GENERIC_LINUX_CONFIG) $(LINUX_CONFIG) > $(LINUX_DIR)/.config
 	$(MAKE) -C $(LINUX_DIR) $(KERNEL_MAKEOPTS) menuconfig
-	$(SCRIPT_DIR)/config.pl '>' $(GENERIC_PLATFORM_DIR)/config-template $(LINUX_DIR)/.config > $(LINUX_CONFIG)
+	$(SCRIPT_DIR)/config.pl '>' $(GENERIC_LINUX_CONFIG) $(LINUX_DIR)/.config > $(LINUX_CONFIG)
 
 install: $(LINUX_DIR)/.image
 
diff --git a/include/kernel-defaults.mk b/include/kernel-defaults.mk
index 84030a6d2d..6930a962a5 100644
--- a/include/kernel-defaults.mk
+++ b/include/kernel-defaults.mk
@@ -47,9 +47,9 @@ define Kernel/Configure/2.6
 endef
 define Kernel/Configure/Default
 	@if [ -f "./config/profile-$(PROFILE)" ]; then \
-		$(SCRIPT_DIR)/config.pl '+' $(GENERIC_PLATFORM_DIR)/config-template '+' $(LINUX_CONFIG) ./config/profile-$(PROFILE) > $(LINUX_DIR)/.config; \
+		$(SCRIPT_DIR)/config.pl '+' $(GENERIC_LINUX_CONFIG) '+' $(LINUX_CONFIG) ./config/profile-$(PROFILE) > $(LINUX_DIR)/.config; \
 	else \
-		$(SCRIPT_DIR)/config.pl '+' $(GENERIC_PLATFORM_DIR)/config-template $(LINUX_CONFIG) > $(LINUX_DIR)/.config; \
+		$(SCRIPT_DIR)/config.pl '+' $(GENERIC_LINUX_CONFIG) $(LINUX_CONFIG) > $(LINUX_DIR)/.config; \
 	fi
 	$(call Kernel/Configure/$(KERNEL))
 	rm -rf $(KERNEL_BUILD_DIR)/modules
diff --git a/include/kernel.mk b/include/kernel.mk
index 00a7882acb..e6fc3325e6 100644
--- a/include/kernel.mk
+++ b/include/kernel.mk
@@ -26,8 +26,11 @@ else
     KERNEL_CROSS:=$(TARGET_CROSS)
   endif
 
+  KERNEL_PATCHVER:=$(shell echo $(LINUX_VERSION) | cut -d. -f1,2,3 | cut -d- -f1)
   PLATFORM_DIR := $(TOPDIR)/target/linux/$(BOARD)-$(KERNEL)
+  PATCH_DIR := ./patches$(shell [ -d "./patches-$(KERNEL_PATCHVER)" ] && printf -- "-$(KERNEL_PATCHVER)" || true )
   GENERIC_PLATFORM_DIR := $(TOPDIR)/target/linux/generic-$(KERNEL)
+  GENERIC_PATCH_DIR := $(GENERIC_PLATFORM_DIR)/patches$(shell [ -d "$(GENERIC_PLATFORM_DIR)/patches-$(KERNEL_PATCHVER)" ] && printf -- "-$(KERNEL_PATCHVER)" || true )
   KERNEL_BUILD_DIR:=$(BUILD_DIR)/linux-$(KERNEL)-$(BOARD)
   LINUX_DIR := $(KERNEL_BUILD_DIR)/linux-$(LINUX_VERSION)
 
@@ -37,10 +40,11 @@ else
   LINUX_KERNEL:=$(KERNEL_BUILD_DIR)/vmlinux
 
   LINUX_SOURCE:=linux-$(LINUX_VERSION).tar.bz2
-  LINUX_SITE:=http://www.us.kernel.org/pub/linux/kernel/v$(KERNEL) \
-           http://www.us.kernel.org/pub/linux/kernel/v$(KERNEL) \
-           http://www.kernel.org/pub/linux/kernel/v$(KERNEL) \
-           http://www.de.kernel.org/pub/linux/kernel/v$(KERNEL)
+  TESTING:=$(if $(findstring -rc,$(LINUX_VERSION)),/testing,)
+  LINUX_SITE:=http://www.us.kernel.org/pub/linux/kernel/v$(KERNEL)$(TESTING) \
+           http://www.us.kernel.org/pub/linux/kernel/v$(KERNEL)$(TESTING) \
+           http://www.kernel.org/pub/linux/kernel/v$(KERNEL)$(TESTING) \
+           http://www.de.kernel.org/pub/linux/kernel/v$(KERNEL)$(TESTING)
 
   PKG_BUILD_DIR ?= $(KERNEL_BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
 
diff --git a/include/quilt.mk b/include/quilt.mk
index 395f64e174..e02fab0879 100644
--- a/include/quilt.mk
+++ b/include/quilt.mk
@@ -46,11 +46,11 @@ endif
 define Kernel/Patch/Default
 	if [ -d $(GENERIC_PLATFORM_DIR)/files ]; then $(CP) $(GENERIC_PLATFORM_DIR)/files/* $(LINUX_DIR)/; fi
 	if [ -d ./files ]; then $(CP) ./files/* $(LINUX_DIR)/; fi
-	$(if $(strip $(QUILT)),$(call Quilt/Patch,$(GENERIC_PLATFORM_DIR)/patches,generic/), \
-		if [ -d $(GENERIC_PLATFORM_DIR)/patches ]; then $(PATCH) $(LINUX_DIR) $(GENERIC_PLATFORM_DIR)/patches; fi \
+	$(if $(strip $(QUILT)),$(call Quilt/Patch,$(GENERIC_PATCH_DIR),generic/), \
+		if [ -d $(GENERIC_PATCH_DIR) ]; then $(PATCH) $(LINUX_DIR) $(GENERIC_PATCH_DIR); fi \
 	)
-	$(if $(strip $(QUILT)),$(call Quilt/Patch,./patches,platform/), \
-		if [ -d ./patches ]; then $(PATCH) $(LINUX_DIR) ./patches; fi \
+	$(if $(strip $(QUILT)),$(call Quilt/Patch,$(PATCH_DIR),platform/), \
+		if [ -d $(PATCH_DIR) ]; then $(PATCH) $(LINUX_DIR) $(PATCH_DIR); fi \
 	)
 	$(if $(strip $(QUILT)),touch $(PKG_BUILD_DIR)/.quilt_used)
 endef
@@ -79,8 +79,8 @@ define Quilt/Refresh/Kernel
 		echo "All kernel patches must start with either generic/ or platform/"; \
 		false; \
 	}
-	$(call Quilt/RefreshDir,$(GENERIC_PLATFORM_DIR)/patches,generic/)
-	$(call Quilt/RefreshDir,./patches,platform/)
+	$(call Quilt/RefreshDir,$(GENERIC_PATCH_DIR),generic/)
+	$(call Quilt/RefreshDir,$(PATCH_DIR),platform/)
 endef
 
 quilt-check: $(STAMP_PREPARED) FORCE
diff --git a/target/linux/adm5120-2.6/config/default b/target/linux/adm5120-2.6/config/default
index 2ba65db742..227eacc9b0 100644
--- a/target/linux/adm5120-2.6/config/default
+++ b/target/linux/adm5120-2.6/config/default
@@ -3,6 +3,7 @@ CONFIG_32BIT=y
 # CONFIG_64BIT_PHYS_ADDR is not set
 CONFIG_ADM5120_GPIO=y
 CONFIG_ADM5120_NR_UARTS=2
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_BASE_SMALL=0
@@ -62,7 +63,7 @@ CONFIG_GENERIC_GPIO=y
 # CONFIG_GEN_RTC is not set
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
-CONFIG_HID=y
+CONFIG_HID=m
 CONFIG_HWMON=y
 # CONFIG_HWMON_DEBUG_CHIP is not set
 CONFIG_HW_HAS_PCI=y
@@ -96,6 +97,7 @@ CONFIG_JFFS2_SUMMARY=y
 # CONFIG_JOLIET is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -175,7 +177,9 @@ CONFIG_MTD_NAND=y
 # CONFIG_MTD_NAND_DISKONCHIP is not set
 # CONFIG_MTD_NAND_ECC_SMC is not set
 CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
 # CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
 CONFIG_MTD_NAND_RB100=y
 # CONFIG_MTD_NAND_VERIFY_WRITE is not set
 # CONFIG_MTD_OBSOLETE_CHIPS is not set
@@ -199,6 +203,7 @@ CONFIG_NET_KEY=y
 # CONFIG_NET_PKTGEN is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set
diff --git a/target/linux/adm5120eb-2.6/config/default b/target/linux/adm5120eb-2.6/config/default
index 8a8fd9afe3..da6c484cf4 100644
--- a/target/linux/adm5120eb-2.6/config/default
+++ b/target/linux/adm5120eb-2.6/config/default
@@ -3,6 +3,7 @@ CONFIG_32BIT=y
 # CONFIG_64BIT_PHYS_ADDR is not set
 CONFIG_ADM5120_GPIO=y
 CONFIG_ADM5120_NR_UARTS=2
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_BASE_SMALL=0
@@ -60,9 +61,9 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_GPIO=y
 # CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
 # CONFIG_GEN_RTC is not set
+CONFIG_HID=m
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
-CONFIG_HID=y
 CONFIG_HWMON=y
 # CONFIG_HWMON_DEBUG_CHIP is not set
 CONFIG_HW_HAS_PCI=y
@@ -96,6 +97,7 @@ CONFIG_JFFS2_SUMMARY=y
 # CONFIG_JOLIET is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -194,6 +196,7 @@ CONFIG_NET_KEY=y
 # CONFIG_NET_PKTGEN is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set
diff --git a/target/linux/amcc-2.6/config/default b/target/linux/amcc-2.6/config/default
index 7bec44d78c..4edff5b9d5 100644
--- a/target/linux/amcc-2.6/config/default
+++ b/target/linux/amcc-2.6/config/default
@@ -4,6 +4,7 @@ CONFIG_4xx=y
 # CONFIG_6xx is not set
 # CONFIG_8139TOO is not set
 # CONFIG_8xx is not set
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_BAMBOO is not set
 # CONFIG_E200 is not set
 # CONFIG_E500 is not set
@@ -65,6 +66,7 @@ CONFIG_JFFS2_FS_DEBUG=0
 CONFIG_KERNEL_START=0xc0000000
 # CONFIG_KEXEC is not set
 CONFIG_LOWMEM_SIZE=0x30000000
+# CONFIG_MACINTOSH_DRIVERS is not set
 CONFIG_MATH_EMULATION=y
 CONFIG_MINI_FO=y
 CONFIG_MTD=y
@@ -123,6 +125,7 @@ CONFIG_MTD_SPLIT_ROOTFS=y
 # CONFIG_NATSEMI is not set
 # CONFIG_NE2K_PCI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+CONFIG_NETDEV_1000=y
 CONFIG_NOT_COHERENT_CACHE=y
 # CONFIG_NVRAM is not set
 # CONFIG_PCIPCWATCHDOG is not set
diff --git a/target/linux/ar7-2.6/config/default b/target/linux/ar7-2.6/config/default
index 535ae79a8c..3092078a29 100644
--- a/target/linux/ar7-2.6/config/default
+++ b/target/linux/ar7-2.6/config/default
@@ -6,6 +6,7 @@ CONFIG_AR7_GPIO=y
 CONFIG_AR7_WDT=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ATM_DRIVERS=y
 # CONFIG_ATMEL is not set
 CONFIG_BASE_SMALL=0
 # CONFIG_BCM43XX is not set
@@ -73,6 +74,7 @@ CONFIG_JFFS2_FS_DEBUG=0
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_LEDS_AR7=y
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -159,6 +161,7 @@ CONFIG_NEED_MULTIPLE_NODES=y
 # CONFIG_NET_PCI is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 CONFIG_NODES_SHIFT=6
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
diff --git a/target/linux/aruba-2.6/config/default b/target/linux/aruba-2.6/config/default
index 2b14b56aef..f1fe27d04a 100644
--- a/target/linux/aruba-2.6/config/default
+++ b/target/linux/aruba-2.6/config/default
@@ -3,6 +3,7 @@ CONFIG_32BIT=y
 # CONFIG_64BIT_PHYS_ADDR is not set
 # CONFIG_8139TOO is not set
 CONFIG_AR2313=y
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_B44 is not set
 CONFIG_BASE_SMALL=0
 CONFIG_CPU_BIG_ENDIAN=y
@@ -63,6 +64,7 @@ CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_IPW2200 is not set
 CONFIG_JFFS2_FS_DEBUG=0
 # CONFIG_LAN_SAA9730 is not set
+# CONFIG_MACH_ALCHEMY is not set
 CONFIG_MACH_ARUBA=y
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
@@ -155,6 +157,7 @@ CONFIG_MTD_SPLIT_ROOTFS=y
 CONFIG_NATSEMI=y
 # CONFIG_NE2K_PCI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set
@@ -191,6 +194,7 @@ CONFIG_SYS_HAS_CPU_MIPS32_R1=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
 CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+# CONFIG_TC35815 is not set
 # CONFIG_TOSHIBA_JMR3927 is not set
 # CONFIG_TOSHIBA_RBTX4927 is not set
 # CONFIG_TOSHIBA_RBTX4938 is not set
diff --git a/target/linux/atheros-2.6/config/default b/target/linux/atheros-2.6/config/default
index 62a642fec6..497f149c48 100644
--- a/target/linux/atheros-2.6/config/default
+++ b/target/linux/atheros-2.6/config/default
@@ -60,6 +60,7 @@ CONFIG_HW_RANDOM=y
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_IRQ_CPU=y
 CONFIG_JFFS2_FS_DEBUG=0
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -149,6 +150,7 @@ CONFIG_MTD_REDBOOT_PARTS_READONLY=y
 # CONFIG_MTD_SLRAM is not set
 CONFIG_MTD_SPIFLASH=y
 CONFIG_NET_SCH_FIFO=y
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set
diff --git a/target/linux/au1000-2.6/config/default b/target/linux/au1000-2.6/config/default
index 09afbd9cbd..91118e5f3e 100644
--- a/target/linux/au1000-2.6/config/default
+++ b/target/linux/au1000-2.6/config/default
@@ -77,6 +77,7 @@ CONFIG_INITRAMFS_SOURCE=""
 CONFIG_JFFS2_FS_DEBUG=0
 CONFIG_LEDS_MTX1=y
 # CONFIG_LEDS_TRIGGERS is not set
+CONFIG_MACH_ALCHEMY=y
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -164,6 +165,7 @@ CONFIG_MTX1_WATCHDOG=y
 # CONFIG_NE2K_PCI is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 CONFIG_NF_CONNTRACK=y
 CONFIG_NF_CONNTRACK_MARK=y
 CONFIG_NF_CT_ACCT=y
@@ -228,6 +230,7 @@ CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
 CONFIG_SYS_SUPPORTS_KGDB=y
 CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
 # CONFIG_TMD_HERMES is not set
+# CONFIG_TC35815 is not set
 # CONFIG_TOSHIBA_JMR3927 is not set
 # CONFIG_TOSHIBA_RBTX4927 is not set
 # CONFIG_TOSHIBA_RBTX4938 is not set
@@ -244,5 +247,6 @@ CONFIG_USB_EHCI_HCD=m
 CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_UHCI_HCD=m
 # CONFIG_VIA_RHINE is not set
+CONFIG_WDT_MTX1=y
 CONFIG_ZONE_DMA=y
 CONFIG_ZONE_DMA_FLAG=1
diff --git a/target/linux/avr32-2.6/config/default b/target/linux/avr32-2.6/config/default
index ad6a47b458..5bd7078c94 100644
--- a/target/linux/avr32-2.6/config/default
+++ b/target/linux/avr32-2.6/config/default
@@ -2,6 +2,7 @@ CONFIG_AP7000_16_BIT_SMC=y
 # CONFIG_AP7000_32_BIT_SMC is not set
 # CONFIG_AP7000_8_BIT_SMC is not set
 # CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ATM_DRIVERS is not set
 CONFIG_AVR32=y
 CONFIG_BOARD_ATNGW100=y
 # CONFIG_BOARD_ATSTK1000 is not set
@@ -52,6 +53,7 @@ CONFIG_MTD_MAP_BANK_WIDTH_4=y
 # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
 # CONFIG_MTD_MTDRAM is not set
 # CONFIG_MTD_NAND is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_MTD_OBSOLETE_CHIPS is not set
 # CONFIG_MTD_ONENAND is not set
 CONFIG_MTD_PARTITIONS=y
@@ -80,6 +82,7 @@ CONFIG_SPI_ATMEL=y
 # CONFIG_SPI_BITBANG is not set
 # CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
+# CONFIG_SPI_SPIDEV is not set
 CONFIG_SPI=y
 CONFIG_SUBARCH_AVR32B=y
 # CONFIG_UNUSED_SYMBOLS is not set
diff --git a/target/linux/brcm47xx-2.6/config/default b/target/linux/brcm47xx-2.6/config/default
index b722f09102..38e1137c0c 100644
--- a/target/linux/brcm47xx-2.6/config/default
+++ b/target/linux/brcm47xx-2.6/config/default
@@ -5,6 +5,7 @@ CONFIG_32BIT=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 # CONFIG_ARPD is not set
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_ATMEL is not set
 CONFIG_B44=y
 CONFIG_BASE_SMALL=0
@@ -117,6 +118,7 @@ CONFIG_IRQ_CPU=y
 CONFIG_JFFS2_FS_DEBUG=0
 # CONFIG_LIBCRC32C is not set
 # CONFIG_LLC2 is not set
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -198,6 +200,7 @@ CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_NATSEMI is not set
 # CONFIG_NE2K_PCI is not set
+CONFIG_NETDEV_1000=y
 # CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
 # CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
 # CONFIG_NETFILTER_XT_MATCH_DCCP is not set
@@ -260,6 +263,7 @@ CONFIG_SYS_HAS_CPU_MIPS32_R1=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
 CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+# CONFIG_TC35815 is not set
 # CONFIG_TOSHIBA_JMR3927 is not set
 # CONFIG_TOSHIBA_RBTX4927 is not set
 # CONFIG_TOSHIBA_RBTX4938 is not set
diff --git a/target/linux/brcm63xx-2.6/config/default b/target/linux/brcm63xx-2.6/config/default
index 9a29daf37c..abef335ed5 100644
--- a/target/linux/brcm63xx-2.6/config/default
+++ b/target/linux/brcm63xx-2.6/config/default
@@ -4,6 +4,7 @@ CONFIG_32BIT=y
 # CONFIG_8139TOO is not set
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ATM_DRIVERS=y
 CONFIG_AUDIT=y
 CONFIG_AUDIT_GENERIC=y
 CONFIG_BASE_SMALL=0
@@ -116,14 +117,7 @@ CONFIG_HZ_250=y
 # CONFIG_IDE is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
-CONFIG_INPUT=m
-# CONFIG_INPUT_EVDEV is not set
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_INPUT_MOUSE=y
-CONFIG_INPUT_MOUSEDEV=m
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT is not set
 CONFIG_IOSCHED_CFQ=y
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IP6_NF_MATCH_FRAG is not set
@@ -165,10 +159,12 @@ CONFIG_LBD=y
 # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
 # CONFIG_LLC2 is not set
 CONFIG_LXT_PHY=m
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
 # CONFIG_MAC_PARTITION is not set
+# CONFIG_MAC80211_DEBUGFS is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_MARVELL_PHY=m
 CONFIG_MII=m
@@ -206,9 +202,6 @@ CONFIG_MODVERSIONS=y
 # CONFIG_MOMENCO_OCELOT_3 is not set
 # CONFIG_MOMENCO_OCELOT_C is not set
 # CONFIG_MOMENCO_OCELOT_G is not set
-CONFIG_MOUSE_PS2=m
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_VSXXXAA is not set
 CONFIG_MTD=y
 # CONFIG_MTD_ABSENT is not set
 CONFIG_MTD_BCM963XX=y
@@ -277,6 +270,7 @@ CONFIG_NET_SCH_CLK_GETTIMEOFDAY=y
 # CONFIG_NET_SCH_CLK_JIFFIES is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 CONFIG_NLS=y
 CONFIG_NLS_ASCII=m
 # CONFIG_PAGE_SIZE_16KB is not set
@@ -332,6 +326,7 @@ CONFIG_SYS_HAS_CPU_MIPS32_R1=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
 CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+# CONFIG_TC35815 is not set
 CONFIG_TCP_CONG_BIC=y
 # CONFIG_TCP_CONG_HSTCP is not set
 # CONFIG_TCP_CONG_HYBLA is not set
diff --git a/target/linux/generic-2.6/config-2.6.22 b/target/linux/generic-2.6/config-2.6.22
new file mode 100644
index 0000000000..5fac0a48f5
--- /dev/null
+++ b/target/linux/generic-2.6/config-2.6.22
@@ -0,0 +1,1597 @@
+# CONFIG_6PACK is not set
+# CONFIG_8139CP is not set
+# CONFIG_9P_FS is not set
+# CONFIG_ACENIC is not set
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_AIRO=m
+CONFIG_AIRO_CS=m
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_AMIGA_PARTITION is not set
+CONFIG_ANON_INODES=y
+# CONFIG_APPLICOM is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCNET is not set
+CONFIG_ARPD=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_ATA is not set
+# CONFIG_ATALK is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_ATL1 is not set
+CONFIG_ATM=m
+CONFIG_ATMEL=m
+# CONFIG_ATM_AMBASSADOR is not set
+CONFIG_ATM_BR2684=m
+CONFIG_ATM_BR2684_IPFILTER=y
+CONFIG_ATM_CLIP=m
+CONFIG_ATM_CLIP_NO_ICMP=y
+CONFIG_ATM_DUMMY=m
+# CONFIG_ATM_ENI is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_FORE200E_MAYBE is not set
+# CONFIG_ATM_HE is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_IA is not set
+# CONFIG_ATM_IDT77252 is not set
+# CONFIG_ATM_LANAI is not set
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+# CONFIG_ATM_NICSTAR is not set
+CONFIG_ATM_TCP=m
+# CONFIG_ATM_ZATM is not set
+# CONFIG_AUDIT is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AX25=m
+# CONFIG_AX25_DAMA_SLAVE is not set
+# CONFIG_B44 is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_BASE_FULL=y
+# CONFIG_BASLER_EXCITE is not set
+# CONFIG_BAYCOM_SER_FDX is not set
+# CONFIG_BAYCOM_SER_HDX is not set
+CONFIG_BCM43XX=m
+CONFIG_BCM43XX_DEBUG=y
+CONFIG_BCM43XX_DMA=y
+CONFIG_BCM43XX_DMA_AND_PIO_MODE=y
+# CONFIG_BCM43XX_DMA_MODE is not set
+CONFIG_BCM43XX_PIO=y
+# CONFIG_BCM43XX_PIO_MODE is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_BLINK is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_CS5535 is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_DELKIN is not set
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_IDEPNP is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+CONFIG_BLK_DEV_SD=m
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_UB is not set
+# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLOCK=y
+# CONFIG_BNX2 is not set
+CONFIG_BONDING=m
+# CONFIG_BPQETHER is not set
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_BT=m
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+# CONFIG_BT_CMTP is not set
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBFUSB=m
+# CONFIG_BT_HCIBLUECARD is not set
+CONFIG_BT_HCIBPA10X=m
+# CONFIG_BT_HCIBT3C is not set
+# CONFIG_BT_HCIBTUART is not set
+# CONFIG_BT_HCIDTL1 is not set
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUSB=m
+CONFIG_BT_HCIUSB_SCO=y
+CONFIG_BT_HCIVHCI=m
+CONFIG_BT_HIDP=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_SCO=m
+CONFIG_BUG=y
+# CONFIG_CAPI_AVM is not set
+# CONFIG_CAPI_EICON is not set
+# CONFIG_CAPI_TRACE is not set
+CONFIG_CARDBUS=y
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_CASSINI is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_CDROM_PKTCDVD is not set
+CONFIG_CFG80211=m
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CICADA_PHY is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+CONFIG_CIFS_POSIX=y
+CONFIG_CIFS_STATS=y
+# CONFIG_CIFS_STATS2 is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+CONFIG_CLS_U32_MARK=y
+CONFIG_CLS_U32_PERF=y
+CONFIG_CMDLINE=""
+# CONFIG_CODA_FS is not set
+CONFIG_CONFIGFS_FS=y
+# CONFIG_CONNECTOR is not set
+# CONFIG_CRAMFS is not set
+CONFIG_CRC16=m
+CONFIG_CRC32=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CROSSCOMPILE=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_CAMELLIA is not set
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_WP512=m
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_DAB is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DECNET is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_BIC is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_CUBIC is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_HTCP is not set
+CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_DEFAULT_NOOP is not set
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="vegas"
+CONFIG_DEFAULT_VEGAS=y
+# CONFIG_DEFAULT_WESTWOOD is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVFS_FS=y
+CONFIG_DEVFS_MOUNT=y
+# CONFIG_DGRS is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_DL2K is not set
+# CONFIG_DLM is not set
+# CONFIG_DMA_ENGINE is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_DRM is not set
+# CONFIG_DTLK is not set
+# CONFIG_DUMMY is not set
+# CONFIG_DVB is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_E100=m
+# CONFIG_E1000 is not set
+# CONFIG_ECONET is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_ELF_CORE is not set
+CONFIG_EMBEDDED=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_EPIC100 is not set
+CONFIG_EPOLL=y
+# CONFIG_EQUALIZER is not set
+CONFIG_EVENTFD=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_EXPORTFS=m
+CONFIG_EXT2_FS=m
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=m
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_FAT_FS=m
+# CONFIG_FB is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FDDI is not set
+# CONFIG_FEALNX is not set
+CONFIG_FIB_RULES=y
+# CONFIG_FIREWIRE is not set
+CONFIG_FLATMEM=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_FORCEDETH is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_FTL is not set
+# CONFIG_FUSE_FS is not set
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+# CONFIG_FUSION_SPI is not set
+CONFIG_FUTEX=y
+CONFIG_FW_LOADER=y
+CONFIG_GACT_PROB=y
+# CONFIG_GAMEPORT is not set
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_TIME=y
+# CONFIG_GFS2_FS is not set
+# CONFIG_HAMACHI is not set
+CONFIG_HAMRADIO=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_HERMES=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_HFS_FS=m
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HID_FF is not set
+CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_HIPPI is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_CS=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PCI=m
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOTPLUG=y
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_HP100 is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+CONFIG_HZ=100
+CONFIG_HZ_100=y
+# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_1024 is not set
+# CONFIG_HZ_128 is not set
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_256 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_48 is not set
+# CONFIG_I2C_ALGOPCA is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_ELEKTOR is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PCA_ISA is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_TINY_USB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2O is not set
+# CONFIG_I82092 is not set
+# CONFIG_IEEE1394 is not set
+CONFIG_IEEE80211=m
+CONFIG_IEEE80211_CRYPT_CCMP=m
+CONFIG_IEEE80211_CRYPT_TKIP=m
+CONFIG_IEEE80211_CRYPT_WEP=m
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_SOFTMAC=m
+# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+CONFIG_IFB=m
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IMQ=m
+# CONFIG_IMQ_BEHAVIOR_AA is not set
+# CONFIG_IMQ_BEHAVIOR_AB is not set
+CONFIG_IMQ_BEHAVIOR_BA=y
+# CONFIG_IMQ_BEHAVIOR_BB is not set
+CONFIG_IMQ_NUM_DEVS=2
+CONFIG_INET=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET_AH=m
+CONFIG_INET_DCCP_DIAG=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_TCP_DIAG=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_TUNNEL=m
+# CONFIG_INFINIBAND is not set
+# CONFIG_INFTL is not set
+CONFIG_INIT_ENV_ARG_LIMIT=32
+# CONFIG_INOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_INPUT is not set
+# CONFIG_INPUT_EVBUG is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MISC is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+# CONFIG_IP6_NF_MATCH_MH is not set
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_OWNER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_TARGET_IMQ=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_ROUTE=m
+# CONFIG_IPC_NS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_IPSEC_NAT_TRAVERSAL=y
+CONFIG_IPV6=m
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_IPV6_PRIVACY is not set
+CONFIG_IPV6_ROUTER_PREF=y
+# CONFIG_IPV6_ROUTE_INFO is not set
+CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_TUNNEL is not set
+CONFIG_IPW2100=m
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2100_MONITOR=y
+CONFIG_IPW2200=m
+# CONFIG_IPW2200_DEBUG is not set
+CONFIG_IPW2200_MONITOR=y
+# CONFIG_IPW2200_PROMISCUOUS is not set
+# CONFIG_IPW2200_QOS is not set
+# CONFIG_IPW2200_RADIOTAP is not set
+# CONFIG_IPX is not set
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_DCCP=m
+CONFIG_IP_DCCP_ACKVEC=y
+CONFIG_IP_DCCP_CCID2=m
+# CONFIG_IP_DCCP_CCID2_DEBUG is not set
+CONFIG_IP_DCCP_CCID3=m
+# CONFIG_IP_DCCP_CCID3_DEBUG is not set
+CONFIG_IP_DCCP_CCID3_RTO=100
+CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+# CONFIG_IP_MROUTE is not set
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_NF_CONNTRACK=y
+# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
+CONFIG_IP_NF_CONNTRACK_MARK=y
+# CONFIG_IP_NF_CONNTRACK_SUPPORT is not set
+CONFIG_IP_NF_CT_ACCT=y
+CONFIG_IP_NF_CT_PROTO_SCTP=m
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_H323=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_HASHLIMIT=m
+CONFIG_IP_NF_MATCH_IPP2P=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_LAYER7=m
+# CONFIG_IP_NF_MATCH_LAYER7_DEBUG is not set
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_SET=m
+CONFIG_IP_NF_MATCH_TIME=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_H323=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_NAT_PPTP=m
+CONFIG_IP_NF_NAT_SIP=m
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_TFTP=m
+# CONFIG_IP_NF_NETBIOS_NS is not set
+CONFIG_IP_NF_PPTP=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SET=m
+CONFIG_IP_NF_SET_HASHSIZE=1024
+CONFIG_IP_NF_SET_IPHASH=m
+CONFIG_IP_NF_SET_IPMAP=m
+CONFIG_IP_NF_SET_IPPORTHASH=m
+CONFIG_IP_NF_SET_IPTREE=m
+CONFIG_IP_NF_SET_MACIPMAP=m
+CONFIG_IP_NF_SET_MAX=256
+CONFIG_IP_NF_SET_NETHASH=m
+CONFIG_IP_NF_SET_PORTMAP=m
+CONFIG_IP_NF_SIP=m
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_IMQ=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_ROUTE=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_TARGET_SET=m
+CONFIG_IP_NF_TARGET_TCPMSS=y
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TFTP=m
+# CONFIG_IP_PNP is not set
+CONFIG_IP_ROUTE_FWMARK=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_MULTIPATH_CACHED=y
+CONFIG_IP_ROUTE_MULTIPATH_DRR=m
+CONFIG_IP_ROUTE_MULTIPATH_RANDOM=m
+CONFIG_IP_ROUTE_MULTIPATH_RR=m
+CONFIG_IP_ROUTE_MULTIPATH_WRANDOM=m
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_SCTP is not set
+# CONFIG_IP_VS is not set
+# CONFIG_IRDA is not set
+# CONFIG_ISCSI_TCP is not set
+CONFIG_ISDN=m
+CONFIG_ISDN_CAPI=m
+CONFIG_ISDN_CAPI_CAPI20=m
+CONFIG_ISDN_CAPI_CAPIFS=m
+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
+CONFIG_ISDN_CAPI_MIDDLEWARE=y
+# CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON is not set
+# CONFIG_ISDN_I4L is not set
+CONFIG_ISO9660_FS=m
+# CONFIG_IXGB is not set
+CONFIG_JBD=m
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_XATTR is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_SUMMARY is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFS_DEBUG is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_SECURITY is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_JOLIET=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_KEXEC is not set
+# CONFIG_KEYS is not set
+# CONFIG_KMOD is not set
+# CONFIG_LAPB is not set
+# CONFIG_LASAT is not set
+# CONFIG_LBD is not set
+# CONFIG_LDM_PARTITION is not set
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_LIBCRC32C=m
+# CONFIG_LIBERTAS_USB is not set
+CONFIG_LLC=y
+CONFIG_LLC2=m
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_LOCKD=m
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_LOCKD_V4=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_LSF is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_MAC80211_DEBUG is not set
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211=m
+# CONFIG_MAC_EMUMOUSEBTN is not set
+CONFIG_MAC_PARTITION=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_MARKEINS is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_MD is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_MFD_SM501 is not set
+CONFIG_MII=y
+CONFIG_MINIX_FS=m
+# CONFIG_MINIX_SUBPARTITION is not set
+CONFIG_MINI_FO=y
+CONFIG_MKISS=m
+# CONFIG_MMC is not set
+CONFIG_MMU=y
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_MSDOS_FS=m
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_MTD_NAND is not set
+CONFIG_MTD_SPLIT_ROOTFS=y
+# CONFIG_MTD_UBI is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NCP_FS is not set
+CONFIG_NET=y
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETDEBUG is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEV_10000 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_NETLINK is not set
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PORTSCAN=m
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_CHAOS=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_DELUDE=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+# CONFIG_NETPOLL is not set
+# CONFIG_NETROM is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETXEN_NIC is not set
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_POLICE=m
+# CONFIG_NET_ACT_SIMP is not set
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_IND=y
+CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_ETHERNET=y
+# CONFIG_NET_FC is not set
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_KEY=m
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_NET_PCI=y
+# CONFIG_NET_PCMCIA is not set
+CONFIG_NET_PKTGEN=m
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_NET_RADIO=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_CBQ=m
+# CONFIG_NET_SCH_CLK_CPU is not set
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+CONFIG_NET_SCH_CLK_JIFFIES=y
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_ESFQ=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_TEQL=m
+# CONFIG_NET_TULIP is not set
+CONFIG_NET_WIRELESS=y
+CONFIG_NET_WIRELESS_RTNETLINK=y
+CONFIG_NEW_LEDS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_TCP=y
+# CONFIG_NFSD_V2_ACL is not set
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+CONFIG_NFSD_V4=y
+# CONFIG_NFS_ACL_SUPPORT is not set
+CONFIG_NFS_COMMON=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFTL is not set
+CONFIG_NF_CONNTRACK=y
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+CONFIG_NF_CONNTRACK_ENABLED=y
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IPV4=y
+# CONFIG_NF_CONNTRACK_IPV6 is not set
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+# CONFIG_NF_CONNTRACK_SANE is not set
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_SUPPORT=y
+# CONFIG_NF_CONNTRACK_TFTP is not set
+CONFIG_NF_CT_ACCT=y
+# CONFIG_NF_CT_PROTO_SCTP is not set
+CONFIG_NF_NAT=y
+# CONFIG_NF_NAT_AMANDA is not set
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_PPTP is not set
+CONFIG_NF_NAT_SIP=m
+# CONFIG_NF_NAT_SNMP_BASIC is not set
+# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NLS=m
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_CODEPAGE_1250=m
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+CONFIG_NLS_KOI8_R=m
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+CONFIG_NO_HZ=y
+CONFIG_NORTEL_HERMES=m
+# CONFIG_NS83820 is not set
+# CONFIG_NTFS_DEBUG is not set
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+# CONFIG_OCFS2_FS is not set
+# CONFIG_OSF_PARTITION is not set
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+# CONFIG_PARPORT is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_ISAPNP is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PCMCIA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+CONFIG_PATA_PLATFORM=m
+# CONFIG_PATA_QDI is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+# CONFIG_PATA_WINBOND_VLB is not set
+# CONFIG_PC300TOO is not set
+# CONFIG_PCCARD is not set
+CONFIG_PCI=y
+CONFIG_PCI_ATMEL=m
+CONFIG_PCI_HERMES=m
+# CONFIG_PCMCIA is not set
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_DEBUG is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_HERMES is not set
+# CONFIG_PCMCIA_IOCTL is not set
+# CONFIG_PCMCIA_LOAD_CIS is not set
+# CONFIG_PCMCIA_NETWAVE is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_PCMCIA_SPECTRUM is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+# CONFIG_PCMCIA_WAVELAN is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_PCNET32 is not set
+# CONFIG_PD6729 is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_PHONE is not set
+# CONFIG_PHYLIB is not set
+CONFIG_PLIST=y
+CONFIG_PLX_HERMES=m
+# CONFIG_PM is not set
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_PPP=m
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_PREEMPT is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_PRINTK=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_PRISM54=m
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_PROFILING is not set
+# CONFIG_QEMU is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_R3964 is not set
+# CONFIG_R8169 is not set
+# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_RADIO_AZTECH is not set
+# CONFIG_RADIO_CADET is not set
+# CONFIG_RADIO_GEMTEK is not set
+# CONFIG_RADIO_GEMTEK_PCI is not set
+# CONFIG_RADIO_MAESTRO is not set
+# CONFIG_RADIO_MAXIRADIO is not set
+# CONFIG_RADIO_RTRACK is not set
+# CONFIG_RADIO_RTRACK2 is not set
+# CONFIG_RADIO_SF16FMI is not set
+# CONFIG_RADIO_SF16FMR2 is not set
+# CONFIG_RADIO_TERRATEC is not set
+# CONFIG_RADIO_TRUST is not set
+# CONFIG_RADIO_TYPHOON is not set
+# CONFIG_RADIO_ZOLTRIX is not set
+# CONFIG_RAID_ATTRS is not set
+CONFIG_RAMFS=y
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_RELAY is not set
+# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_RFKILL is not set
+CONFIG_ROMFS_FS=m
+# CONFIG_ROSE is not set
+CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_MAX6900 is not set
+CONFIG_RT_MUTEXES=y
+# CONFIG_S2IO is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SC92031 is not set
+CONFIG_SCSI=m
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_ESP_CORE is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_LPFC is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_PAS16 is not set
+CONFIG_SCSI_PROC_FS=y
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+# CONFIG_SCSI_SEAGATE is not set
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SECURITY is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=m
+CONFIG_SERIAL_8250_NR_UARTS=2
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_SERIAL_UARTLITE is not set
+# CONFIG_SERIO is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_SHAPER is not set
+CONFIG_SHMEM=y
+CONFIG_SIGNALFD=y
+# CONFIG_SIS190 is not set
+# CONFIG_SIS900 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+CONFIG_SLHC=m
+# CONFIG_SLIP is not set
+# CONFIG_SLOB is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_SND=m
+# CONFIG_SND_AD1816A is not set
+# CONFIG_SND_AD1848 is not set
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ADLIB is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ALS100 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AZT2320 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMI8330 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_CS4231 is not set
+# CONFIG_SND_CS4232 is not set
+# CONFIG_SND_CS4236 is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_DT019X is not set
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1688 is not set
+# CONFIG_SND_ES18XX is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_ES968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_GUSCLASSIC is not set
+# CONFIG_SND_GUSEXTREME is not set
+# CONFIG_SND_GUSMAX is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+CONFIG_SND_HWDEP=m
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_INTERWAVE is not set
+# CONFIG_SND_INTERWAVE_STB is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_MIRO is not set
+# CONFIG_SND_MIXART is not set
+CONFIG_SND_MIXER_OSS=m
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_OPL3SA2 is not set
+# CONFIG_SND_OPTI92X_AD1848 is not set
+# CONFIG_SND_OPTI92X_CS4231 is not set
+# CONFIG_SND_OPTI93X is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_PCM=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_PDAUDIOCF is not set
+CONFIG_SND_RAWMIDI=m
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_RTCTIMER is not set
+# CONFIG_SND_SB16 is not set
+# CONFIG_SND_SB8 is not set
+# CONFIG_SND_SBAWE is not set
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_SGALAXY is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_SSCAPE is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_TIMER=m
+# CONFIG_SND_TRIDENT is not set
+CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_VXPOCKET is not set
+# CONFIG_SND_WAVEFRONT is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+CONFIG_SOUND=m
+# CONFIG_SOUND_PRIME is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_SQUASHFS_VMALLOC is not set
+# CONFIG_SSFDC is not set
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_STANDALONE=y
+# CONFIG_STRIP is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_SUNGEM is not set
+CONFIG_SUNRPC=m
+# CONFIG_SUNRPC_BIND34 is not set
+CONFIG_SUNRPC_GSS=m
+# CONFIG_SUN_PARTITION is not set
+CONFIG_SWAP=y
+# CONFIG_SYNCLINK_CS is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_SYSCTL=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SYSFS=y
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_SYSVIPC=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_CUBIC=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_VEGAS=y
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_WESTWOOD=m
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_TEXTSEARCH_KMP=m
+# CONFIG_TIFM_CORE is not set
+# CONFIG_TIGON3 is not set
+CONFIG_TIMERFD=y
+# CONFIG_TINY_SHMEM is not set
+# CONFIG_TIPC is not set
+# CONFIG_TLAN is not set
+CONFIG_TMD_HERMES=m
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TR is not set
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_TUN=m
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+# CONFIG_UFS_FS is not set
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_UNIX=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_UNIXWARE_DISKLABEL is not set
+CONFIG_USB=m
+# CONFIG_USB_ACECAD is not set
+CONFIG_USB_ACM=m
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AIPTEK is not set
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_APPLETOUCH is not set
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARMLINUX=y
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
+CONFIG_USB_ATM=m
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_BANDWIDTH is not set
+CONFIG_USB_BELKIN=y
+# CONFIG_USB_BERRY_CHARGE is not set
+CONFIG_USB_CATC=m
+# CONFIG_USB_CXACRU is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_DABUSB is not set
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EPSON2888 is not set
+CONFIG_USB_EZUSB=y
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_GADGET is not set
+CONFIG_USB_HID=m
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+CONFIG_USB_HIDINPUT=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_KAWETH=m
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LIBUSUAL is not set
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_MOUSE is not set
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+# CONFIG_USB_OHCI_HCD is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_OTG is not set
+CONFIG_USB_PEGASUS=m
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_POWERMATE is not set
+CONFIG_USB_PRINTER=m
+# CONFIG_USB_RIO500 is not set
+CONFIG_USB_RTL8150=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_AIRPRIME=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CP2101=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+# CONFIG_USB_SERIAL_DEBUG is not set
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_XIRCOM=m
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_SL811_HCD is not set
+CONFIG_USB_SPEEDTOUCH=m
+CONFIG_USB_STORAGE=m
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_DATAFAB=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DPCM=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_KARMA=y
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_USBAT=y
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_TOUCHSCREEN is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+CONFIG_USB_UEAGLEATM=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_USBNET_MII=m
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_XUSBATM is not set
+CONFIG_USB_YEALINK=m
+CONFIG_USB_ZD1201=m
+# CONFIG_UTS_NS is not set
+CONFIG_VFAT_FS=m
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_VIDEO_CAPTURE_DRIVERS is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_CX2341X is not set
+# CONFIG_VIDEO_CX25840 is not set
+# CONFIG_VIDEO_CX88 is not set
+CONFIG_VIDEO_DEV=m
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7134 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_VIDEO_V4L1 is not set
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_VIDEO_V4L2=y
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VITESSE_PHY is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_VT is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_W1 is not set
+# CONFIG_WAN is not set
+# CONFIG_WAN_ROUTER is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+# CONFIG_WDTPCI is not set
+CONFIG_WIRELESS_EXT=y
+CONFIG_WLAN_80211=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_X25 is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+CONFIG_XFRM_USER=m
+CONFIG_XFS_FS=m
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_YAFFS_FS is not set
+# CONFIG_YAM is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_YENTA=m
+# CONFIG_YENTA_O2 is not set
+# CONFIG_YENTA_RICOH is not set
+# CONFIG_YENTA_TI is not set
+# CONFIG_YENTA_TOSHIBA is not set
+CONFIG_ZD1211RW=m
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=m
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZONE_DMA=y
+CONFIG_ZONE_DMA_FLAG=1
diff --git a/target/linux/generic-2.6/patches-2.6.22/001-squashfs.patch b/target/linux/generic-2.6/patches-2.6.22/001-squashfs.patch
new file mode 100644
index 0000000000..38306841d3
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/001-squashfs.patch
@@ -0,0 +1,4168 @@
+diff -urN linux-2.6.21.1.old/fs/Kconfig linux-2.6.21.1.dev/fs/Kconfig
+--- linux-2.6.21.1.old/fs/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/fs/Kconfig	2007-05-26 19:00:37.121351760 +0200
+@@ -1367,6 +1367,71 @@
+ 
+ 	  If unsure, say N.
+ 
++config SQUASHFS
++	tristate "SquashFS 3.0 - Squashed file system support"
++	select ZLIB_INFLATE
++	help
++	  Saying Y here includes support for SquashFS 3.0 (a Compressed Read-Only File
++	  System).  Squashfs is a highly compressed read-only filesystem for Linux.
++	  It uses zlib compression to compress both files, inodes and directories.
++	  Inodes in the system are very small and all blocks are packed to minimise
++	  data overhead. Block sizes greater than 4K are supported up to a maximum of 64K.
++	  SquashFS 3.0 supports 64 bit filesystems and files (larger than 4GB), full
++	  uid/gid information, hard links and timestamps.
++
++	  Squashfs is intended for general read-only filesystem use, for archival
++	  use (i.e. in cases where a .tar.gz file may be used), and in embedded
++	  systems where low overhead is needed.  Further information and filesystem tools
++	  are available from http://squashfs.sourceforge.net.
++
++	  If you want to compile this as a module ( = code which can be
++	  inserted in and removed from the running kernel whenever you want),
++	  say M here and read <file:Documentation/modules.txt>.  The module
++	  will be called squashfs.  Note that the root file system (the one
++	  containing the directory /) cannot be compiled as a module.
++
++	  If unsure, say N.
++
++config SQUASHFS_EMBEDDED
++
++	bool "Additional options for memory-constrained systems"
++	depends on SQUASHFS
++	default n
++	help
++	  Saying Y here allows you to specify cache sizes and how Squashfs
++	  allocates memory.  This is only intended for memory constrained
++	  systems.
++
++	  If unsure, say N.
++
++config SQUASHFS_FRAGMENT_CACHE_SIZE
++	int "Number of fragments cached" if SQUASHFS_EMBEDDED
++	depends on SQUASHFS
++	default "3"
++	help
++	  By default SquashFS caches the last 3 fragments read from
++	  the filesystem.  Increasing this amount may mean SquashFS
++	  has to re-read fragments less often from disk, at the expense
++	  of extra system memory.  Decreasing this amount will mean
++	  SquashFS uses less memory at the expense of extra reads from disk.
++
++	  Note there must be at least one cached fragment.  Anything
++	  much more than three will probably not make much difference.
++
++config SQUASHFS_VMALLOC
++	bool "Use Vmalloc rather than Kmalloc" if SQUASHFS_EMBEDDED
++	depends on SQUASHFS
++	default n
++	help
++	  By default SquashFS uses kmalloc to obtain fragment cache memory.
++	  Kmalloc memory is the standard kernel allocator, but it can fail
++	  on memory constrained systems.  Because of the way Vmalloc works,
++	  Vmalloc can succeed when kmalloc fails.  Specifying this option
++	  will make SquashFS always use Vmalloc to allocate the
++	  fragment cache memory.
++
++	  If unsure, say N.
++
+ config VXFS_FS
+ 	tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
+ 	depends on BLOCK
+diff -urN linux-2.6.21.1.old/fs/Makefile linux-2.6.21.1.dev/fs/Makefile
+--- linux-2.6.21.1.old/fs/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/fs/Makefile	2007-05-26 19:00:37.121351760 +0200
+@@ -72,6 +72,7 @@
+ obj-$(CONFIG_JBD2)		+= jbd2/
+ obj-$(CONFIG_EXT2_FS)		+= ext2/
+ obj-$(CONFIG_CRAMFS)		+= cramfs/
++obj-$(CONFIG_SQUASHFS)		+= squashfs/
+ obj-$(CONFIG_RAMFS)		+= ramfs/
+ obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
+ obj-$(CONFIG_CODA_FS)		+= coda/
+diff -urN linux-2.6.21.1.old/fs/squashfs/inode.c linux-2.6.21.1.dev/fs/squashfs/inode.c
+--- linux-2.6.21.1.old/fs/squashfs/inode.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/squashfs/inode.c	2007-05-26 19:00:37.123351456 +0200
+@@ -0,0 +1,2122 @@
++/*
++ * Squashfs - a compressed read only filesystem for Linux
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * inode.c
++ */
++
++#include <linux/types.h>
++#include <linux/squashfs_fs.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/squashfs_fs_sb.h>
++#include <linux/squashfs_fs_i.h>
++#include <linux/buffer_head.h>
++#include <linux/vfs.h>
++#include <linux/init.h>
++#include <linux/dcache.h>
++#include <linux/wait.h>
++#include <linux/zlib.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <asm/semaphore.h>
++
++#include "squashfs.h"
++
++static void squashfs_put_super(struct super_block *);
++static int squashfs_statfs(struct dentry *, struct kstatfs *);
++static int squashfs_symlink_readpage(struct file *file, struct page *page);
++static int squashfs_readpage(struct file *file, struct page *page);
++static int squashfs_readpage4K(struct file *file, struct page *page);
++static int squashfs_readdir(struct file *, void *, filldir_t);
++static struct inode *squashfs_alloc_inode(struct super_block *sb);
++static void squashfs_destroy_inode(struct inode *inode);
++static int init_inodecache(void);
++static void destroy_inodecache(void);
++static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
++				struct nameidata *);
++static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode);
++static long long read_blocklist(struct inode *inode, int index,
++				int readahead_blks, char *block_list,
++				unsigned short **block_p, unsigned int *bsize);
++static int squashfs_get_sb(struct file_system_type *, int,
++			const char *, void *, struct vfsmount *);
++
++
++static z_stream stream;
++
++static struct file_system_type squashfs_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "squashfs",
++	.get_sb = squashfs_get_sb,
++	.kill_sb = kill_block_super,
++	.fs_flags = FS_REQUIRES_DEV
++};
++
++static unsigned char squashfs_filetype_table[] = {
++	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
++};
++
++static struct super_operations squashfs_ops = {
++	.alloc_inode = squashfs_alloc_inode,
++	.destroy_inode = squashfs_destroy_inode,
++	.statfs = squashfs_statfs,
++	.put_super = squashfs_put_super,
++};
++
++SQSH_EXTERN struct address_space_operations squashfs_symlink_aops = {
++	.readpage = squashfs_symlink_readpage
++};
++
++SQSH_EXTERN struct address_space_operations squashfs_aops = {
++	.readpage = squashfs_readpage
++};
++
++SQSH_EXTERN struct address_space_operations squashfs_aops_4K = {
++	.readpage = squashfs_readpage4K
++};
++
++static struct file_operations squashfs_dir_ops = {
++	.read = generic_read_dir,
++	.readdir = squashfs_readdir
++};
++
++SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
++	.lookup = squashfs_lookup
++};
++
++
++static struct buffer_head *get_block_length(struct super_block *s,
++				int *cur_index, int *offset, int *c_byte)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	unsigned short temp;
++	struct buffer_head *bh;
++
++	if (!(bh = sb_bread(s, *cur_index)))
++		goto out;
++
++	if (msblk->devblksize - *offset == 1) {
++		if (msblk->swap)
++			((unsigned char *) &temp)[1] = *((unsigned char *)
++				(bh->b_data + *offset));
++		else
++			((unsigned char *) &temp)[0] = *((unsigned char *)
++				(bh->b_data + *offset));
++		brelse(bh);
++		if (!(bh = sb_bread(s, ++(*cur_index))))
++			goto out;
++		if (msblk->swap)
++			((unsigned char *) &temp)[0] = *((unsigned char *)
++				bh->b_data);
++		else
++			((unsigned char *) &temp)[1] = *((unsigned char *)
++				bh->b_data);
++		*c_byte = temp;
++		*offset = 1;
++	} else {
++		if (msblk->swap) {
++			((unsigned char *) &temp)[1] = *((unsigned char *)
++				(bh->b_data + *offset));
++			((unsigned char *) &temp)[0] = *((unsigned char *)
++				(bh->b_data + *offset + 1));
++		} else {
++			((unsigned char *) &temp)[0] = *((unsigned char *)
++				(bh->b_data + *offset));
++			((unsigned char *) &temp)[1] = *((unsigned char *)
++				(bh->b_data + *offset + 1));
++		}
++		*c_byte = temp;
++		*offset += 2;
++	}
++
++	if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
++		if (*offset == msblk->devblksize) {
++			brelse(bh);
++			if (!(bh = sb_bread(s, ++(*cur_index))))
++				goto out;
++			*offset = 0;
++		}
++		if (*((unsigned char *) (bh->b_data + *offset)) !=
++						SQUASHFS_MARKER_BYTE) {
++			ERROR("Metadata block marker corrupt @ %x\n",
++						*cur_index);
++			brelse(bh);
++			goto out;
++		}
++		(*offset)++;
++	}
++	return bh;
++
++out:
++	return NULL;
++}
++
++
++SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
++			long long index, unsigned int length,
++			long long *next_index)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
++			msblk->devblksize_log2) + 2];
++	unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
++	unsigned int cur_index = index >> msblk->devblksize_log2;
++	int bytes, avail_bytes, b = 0, k;
++	char *c_buffer;
++	unsigned int compressed;
++	unsigned int c_byte = length;
++
++	if (c_byte) {
++		bytes = msblk->devblksize - offset;
++		compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
++		c_buffer = compressed ? msblk->read_data : buffer;
++		c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
++
++		TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
++					? "" : "un", (unsigned int) c_byte);
++
++		if (!(bh[0] = sb_getblk(s, cur_index)))
++			goto block_release;
++
++		for (b = 1; bytes < c_byte; b++) {
++			if (!(bh[b] = sb_getblk(s, ++cur_index)))
++				goto block_release;
++			bytes += msblk->devblksize;
++		}
++		ll_rw_block(READ, b, bh);
++	} else {
++		if (!(bh[0] = get_block_length(s, &cur_index, &offset,
++								&c_byte)))
++			goto read_failure;
++
++		bytes = msblk->devblksize - offset;
++		compressed = SQUASHFS_COMPRESSED(c_byte);
++		c_buffer = compressed ? msblk->read_data : buffer;
++		c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
++
++		TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
++					? "" : "un", (unsigned int) c_byte);
++
++		for (b = 1; bytes < c_byte; b++) {
++			if (!(bh[b] = sb_getblk(s, ++cur_index)))
++				goto block_release;
++			bytes += msblk->devblksize;
++		}
++		ll_rw_block(READ, b - 1, bh + 1);
++	}
++
++	if (compressed)
++		down(&msblk->read_data_mutex);
++
++	for (bytes = 0, k = 0; k < b; k++) {
++		avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
++					msblk->devblksize - offset :
++					c_byte - bytes;
++		wait_on_buffer(bh[k]);
++		if (!buffer_uptodate(bh[k]))
++			goto block_release;
++		memcpy(c_buffer + bytes, bh[k]->b_data + offset, avail_bytes);
++		bytes += avail_bytes;
++		offset = 0;
++		brelse(bh[k]);
++	}
++
++	/*
++	 * uncompress block
++	 */
++	if (compressed) {
++		int zlib_err;
++
++		stream.next_in = c_buffer;
++		stream.avail_in = c_byte;
++		stream.next_out = buffer;
++		stream.avail_out = msblk->read_size;
++
++		if (((zlib_err = zlib_inflateInit(&stream)) != Z_OK) ||
++				((zlib_err = zlib_inflate(&stream, Z_FINISH))
++				 != Z_STREAM_END) || ((zlib_err =
++				zlib_inflateEnd(&stream)) != Z_OK)) {
++			ERROR("zlib_fs returned unexpected result 0x%x\n",
++				zlib_err);
++			bytes = 0;
++		} else
++			bytes = stream.total_out;
++
++		up(&msblk->read_data_mutex);
++	}
++
++	if (next_index)
++		*next_index = index + c_byte + (length ? 0 :
++				(SQUASHFS_CHECK_DATA(msblk->sblk.flags)
++				 ? 3 : 2));
++	return bytes;
++
++block_release:
++	while (--b >= 0)
++		brelse(bh[b]);
++
++read_failure:
++	ERROR("sb_bread failed reading block 0x%x\n", cur_index);
++	return 0;
++}
++
++
++SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
++				long long block, unsigned int offset,
++				int length, long long *next_block,
++				unsigned int *next_offset)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	int n, i, bytes, return_length = length;
++	long long next_index;
++
++	TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
++
++	while ( 1 ) {
++		for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
++			if (msblk->block_cache[i].block == block)
++				break;
++
++		down(&msblk->block_cache_mutex);
++
++		if (i == SQUASHFS_CACHED_BLKS) {
++			/* read inode header block */
++			for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
++					n ; n --, i = (i + 1) %
++					SQUASHFS_CACHED_BLKS)
++				if (msblk->block_cache[i].block !=
++							SQUASHFS_USED_BLK)
++					break;
++
++			if (n == 0) {
++				wait_queue_t wait;
++
++				init_waitqueue_entry(&wait, current);
++				add_wait_queue(&msblk->waitq, &wait);
++				set_current_state(TASK_UNINTERRUPTIBLE);
++ 				up(&msblk->block_cache_mutex);
++				schedule();
++				set_current_state(TASK_RUNNING);
++				remove_wait_queue(&msblk->waitq, &wait);
++				continue;
++			}
++			msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
++
++			if (msblk->block_cache[i].block ==
++							SQUASHFS_INVALID_BLK) {
++				if (!(msblk->block_cache[i].data =
++						kmalloc(SQUASHFS_METADATA_SIZE,
++						GFP_KERNEL))) {
++					ERROR("Failed to allocate cache"
++							"block\n");
++					up(&msblk->block_cache_mutex);
++					goto out;
++				}
++			}
++
++			msblk->block_cache[i].block = SQUASHFS_USED_BLK;
++			up(&msblk->block_cache_mutex);
++
++			if (!(msblk->block_cache[i].length =
++						squashfs_read_data(s,
++						msblk->block_cache[i].data,
++						block, 0, &next_index))) {
++				ERROR("Unable to read cache block [%llx:%x]\n",
++						block, offset);
++				goto out;
++			}
++
++			down(&msblk->block_cache_mutex);
++			wake_up(&msblk->waitq);
++			msblk->block_cache[i].block = block;
++			msblk->block_cache[i].next_index = next_index;
++			TRACE("Read cache block [%llx:%x]\n", block, offset);
++		}
++
++		if (msblk->block_cache[i].block != block) {
++			up(&msblk->block_cache_mutex);
++			continue;
++		}
++
++		if ((bytes = msblk->block_cache[i].length - offset) >= length) {
++			if (buffer)
++				memcpy(buffer, msblk->block_cache[i].data +
++						offset, length);
++			if (msblk->block_cache[i].length - offset == length) {
++				*next_block = msblk->block_cache[i].next_index;
++				*next_offset = 0;
++			} else {
++				*next_block = block;
++				*next_offset = offset + length;
++			}
++			up(&msblk->block_cache_mutex);
++			goto finish;
++		} else {
++			if (buffer) {
++				memcpy(buffer, msblk->block_cache[i].data +
++						offset, bytes);
++				buffer += bytes;
++			}
++			block = msblk->block_cache[i].next_index;
++			up(&msblk->block_cache_mutex);
++			length -= bytes;
++			offset = 0;
++		}
++	}
++
++finish:
++	return return_length;
++out:
++	return 0;
++}
++
++
++static int get_fragment_location(struct super_block *s, unsigned int fragment,
++				long long *fragment_start_block,
++				unsigned int *fragment_size)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	long long start_block =
++		msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
++	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
++	struct squashfs_fragment_entry fragment_entry;
++
++	if (msblk->swap) {
++		struct squashfs_fragment_entry sfragment_entry;
++
++		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
++					start_block, offset,
++					sizeof(sfragment_entry), &start_block,
++					&offset))
++			goto out;
++		SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
++	} else
++		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
++					start_block, offset,
++					sizeof(fragment_entry), &start_block,
++					&offset))
++			goto out;
++
++	*fragment_start_block = fragment_entry.start_block;
++	*fragment_size = fragment_entry.size;
++
++	return 1;
++
++out:
++	return 0;
++}
++
++
++SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
++					squashfs_fragment_cache *fragment)
++{
++	down(&msblk->fragment_mutex);
++	fragment->locked --;
++	wake_up(&msblk->fragment_wait_queue);
++	up(&msblk->fragment_mutex);
++}
++
++
++SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
++					*s, long long start_block,
++					int length)
++{
++	int i, n;
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++
++	while ( 1 ) {
++		down(&msblk->fragment_mutex);
++
++		for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
++				msblk->fragment[i].block != start_block; i++);
++
++		if (i == SQUASHFS_CACHED_FRAGMENTS) {
++			for (i = msblk->next_fragment, n =
++				SQUASHFS_CACHED_FRAGMENTS; n &&
++				msblk->fragment[i].locked; n--, i = (i + 1) %
++				SQUASHFS_CACHED_FRAGMENTS);
++
++			if (n == 0) {
++				wait_queue_t wait;
++
++				init_waitqueue_entry(&wait, current);
++				add_wait_queue(&msblk->fragment_wait_queue,
++									&wait);
++				set_current_state(TASK_UNINTERRUPTIBLE);
++				up(&msblk->fragment_mutex);
++				schedule();
++				set_current_state(TASK_RUNNING);
++				remove_wait_queue(&msblk->fragment_wait_queue,
++									&wait);
++				continue;
++			}
++			msblk->next_fragment = (msblk->next_fragment + 1) %
++				SQUASHFS_CACHED_FRAGMENTS;
++
++			if (msblk->fragment[i].data == NULL)
++				if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
++						(SQUASHFS_FILE_MAX_SIZE))) {
++					ERROR("Failed to allocate fragment "
++							"cache block\n");
++					up(&msblk->fragment_mutex);
++					goto out;
++				}
++
++			msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
++			msblk->fragment[i].locked = 1;
++			up(&msblk->fragment_mutex);
++
++			if (!(msblk->fragment[i].length = squashfs_read_data(s,
++						msblk->fragment[i].data,
++						start_block, length, NULL))) {
++				ERROR("Unable to read fragment cache block "
++							"[%llx]\n", start_block);
++				msblk->fragment[i].locked = 0;
++				goto out;
++			}
++
++			msblk->fragment[i].block = start_block;
++			TRACE("New fragment %d, start block %lld, locked %d\n",
++						i, msblk->fragment[i].block,
++						msblk->fragment[i].locked);
++			break;
++		}
++
++		msblk->fragment[i].locked++;
++		up(&msblk->fragment_mutex);
++		TRACE("Got fragment %d, start block %lld, locked %d\n", i,
++						msblk->fragment[i].block,
++						msblk->fragment[i].locked);
++		break;
++	}
++
++	return &msblk->fragment[i];
++
++out:
++	return NULL;
++}
++
++
++static struct inode *squashfs_new_inode(struct super_block *s,
++		struct squashfs_base_inode_header *inodeb)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct inode *i = new_inode(s);
++
++	if (i) {
++		i->i_ino = inodeb->inode_number;
++		i->i_mtime.tv_sec = inodeb->mtime;
++		i->i_atime.tv_sec = inodeb->mtime;
++		i->i_ctime.tv_sec = inodeb->mtime;
++		i->i_uid = msblk->uid[inodeb->uid];
++		i->i_mode = inodeb->mode;
++		i->i_size = 0;
++		if (inodeb->guid == SQUASHFS_GUIDS)
++			i->i_gid = i->i_uid;
++		else
++			i->i_gid = msblk->guid[inodeb->guid];
++	}
++
++	return i;
++}
++
++
++static struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode)
++{
++	struct inode *i;
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long block = SQUASHFS_INODE_BLK(inode) +
++		sblk->inode_table_start;
++	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
++	long long next_block;
++	unsigned int next_offset;
++	union squashfs_inode_header id, sid;
++	struct squashfs_base_inode_header *inodeb = &id.base,
++					  *sinodeb = &sid.base;
++
++	TRACE("Entered squashfs_iget\n");
++
++	if (msblk->swap) {
++		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
++					offset, sizeof(*sinodeb), &next_block,
++					&next_offset))
++			goto failed_read;
++		SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
++					sizeof(*sinodeb));
++	} else
++		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
++					offset, sizeof(*inodeb), &next_block,
++					&next_offset))
++			goto failed_read;
++
++	switch(inodeb->inode_type) {
++		case SQUASHFS_FILE_TYPE: {
++			unsigned int frag_size;
++			long long frag_blk;
++			struct squashfs_reg_inode_header *inodep = &id.reg;
++			struct squashfs_reg_inode_header *sinodep = &sid.reg;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			frag_blk = SQUASHFS_INVALID_BLK;
++			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
++					!get_fragment_location(s,
++					inodep->fragment, &frag_blk, &frag_size))
++				goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = 1;
++			i->i_size = inodep->file_size;
++			i->i_fop = &generic_ro_fops;
++			i->i_mode |= S_IFREG;
++			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
++			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
++			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
++			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++			if (sblk->block_size > 4096)
++				i->i_data.a_ops = &squashfs_aops;
++			else
++				i->i_data.a_ops = &squashfs_aops_4K;
++
++			TRACE("File inode %x:%x, start_block %llx, "
++					"block_list_start %llx, offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, next_block,
++					next_offset);
++			break;
++		}
++		case SQUASHFS_LREG_TYPE: {
++			unsigned int frag_size;
++			long long frag_blk;
++			struct squashfs_lreg_inode_header *inodep = &id.lreg;
++			struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			frag_blk = SQUASHFS_INVALID_BLK;
++			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
++					!get_fragment_location(s,
++					inodep->fragment, &frag_blk, &frag_size))
++				goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_size = inodep->file_size;
++			i->i_fop = &generic_ro_fops;
++			i->i_mode |= S_IFREG;
++			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
++			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
++			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
++			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++			if (sblk->block_size > 4096)
++				i->i_data.a_ops = &squashfs_aops;
++			else
++				i->i_data.a_ops = &squashfs_aops_4K;
++
++			TRACE("File inode %x:%x, start_block %llx, "
++					"block_list_start %llx, offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, next_block,
++					next_offset);
++			break;
++		}
++		case SQUASHFS_DIR_TYPE: {
++			struct squashfs_dir_inode_header *inodep = &id.dir;
++			struct squashfs_dir_inode_header *sinodep = &sid.dir;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_size = inodep->file_size;
++			i->i_op = &squashfs_dir_inode_ops;
++			i->i_fop = &squashfs_dir_ops;
++			i->i_mode |= S_IFDIR;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->offset = inodep->offset;
++			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
++			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
++
++			TRACE("Directory inode %x:%x, start_block %x, offset "
++					"%x\n", SQUASHFS_INODE_BLK(inode),
++					offset, inodep->start_block,
++					inodep->offset);
++			break;
++		}
++		case SQUASHFS_LDIR_TYPE: {
++			struct squashfs_ldir_inode_header *inodep = &id.ldir;
++			struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
++						sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_size = inodep->file_size;
++			i->i_op = &squashfs_dir_inode_ops;
++			i->i_fop = &squashfs_dir_ops;
++			i->i_mode |= S_IFDIR;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->offset = inodep->offset;
++			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
++			SQUASHFS_I(i)->u.s2.directory_index_offset =
++								next_offset;
++			SQUASHFS_I(i)->u.s2.directory_index_count =
++								inodep->i_count;
++			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
++
++			TRACE("Long directory inode %x:%x, start_block %x, "
++					"offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, inodep->offset);
++			break;
++		}
++		case SQUASHFS_SYMLINK_TYPE: {
++			struct squashfs_symlink_inode_header *inodep =
++								&id.symlink;
++			struct squashfs_symlink_inode_header *sinodep =
++								&sid.symlink;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
++								sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_size = inodep->symlink_size;
++			i->i_op = &page_symlink_inode_operations;
++			i->i_data.a_ops = &squashfs_symlink_aops;
++			i->i_mode |= S_IFLNK;
++			SQUASHFS_I(i)->start_block = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++
++			TRACE("Symbolic link inode %x:%x, start_block %llx, "
++					"offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					next_block, next_offset);
++			break;
++		 }
++		 case SQUASHFS_BLKDEV_TYPE:
++		 case SQUASHFS_CHRDEV_TYPE: {
++			struct squashfs_dev_inode_header *inodep = &id.dev;
++			struct squashfs_dev_inode_header *sinodep = &sid.dev;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if ((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_mode |= (inodeb->inode_type ==
++					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
++					S_IFBLK;
++			init_special_inode(i, i->i_mode,
++					old_decode_dev(inodep->rdev));
++
++			TRACE("Device inode %x:%x, rdev %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->rdev);
++			break;
++		 }
++		 case SQUASHFS_FIFO_TYPE:
++		 case SQUASHFS_SOCKET_TYPE: {
++			struct squashfs_ipc_inode_header *inodep = &id.ipc;
++			struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if ((i = squashfs_new_inode(s, inodeb)) == NULL)
++				goto failed_read1;
++
++			i->i_nlink = inodep->nlink;
++			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
++							? S_IFIFO : S_IFSOCK;
++			init_special_inode(i, i->i_mode, 0);
++			break;
++		 }
++		 default:
++			ERROR("Unknown inode type %d in squashfs_iget!\n",
++					inodeb->inode_type);
++			goto failed_read1;
++	}
++
++	insert_inode_hash(i);
++	return i;
++
++failed_read:
++	ERROR("Unable to read inode [%llx:%x]\n", block, offset);
++
++failed_read1:
++	return NULL;
++}
++
++
++static int read_fragment_index_table(struct super_block *s)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	/* Allocate fragment index table */
++	if (!(msblk->fragment_index = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES
++					(sblk->fragments), GFP_KERNEL))) {
++		ERROR("Failed to allocate uid/gid table\n");
++		return 0;
++	}
++
++	if (SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments) &&
++					!squashfs_read_data(s, (char *)
++					msblk->fragment_index,
++					sblk->fragment_table_start,
++					SQUASHFS_FRAGMENT_INDEX_BYTES
++					(sblk->fragments) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++		ERROR("unable to read fragment index table\n");
++		return 0;
++	}
++
++	if (msblk->swap) {
++		int i;
++		long long fragment;
++
++		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments);
++									i++) {
++			SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
++						&msblk->fragment_index[i], 1);
++			msblk->fragment_index[i] = fragment;
++		}
++	}
++
++	return 1;
++}
++
++
++static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
++{
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	msblk->iget = squashfs_iget;
++	msblk->read_blocklist = read_blocklist;
++	msblk->read_fragment_index_table = read_fragment_index_table;
++
++	if (sblk->s_major == 1) {
++		if (!squashfs_1_0_supported(msblk)) {
++			SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
++				"are unsupported\n");
++			SERROR("Please recompile with "
++				"Squashfs 1.0 support enabled\n");
++			return 0;
++		}
++	} else if (sblk->s_major == 2) {
++		if (!squashfs_2_0_supported(msblk)) {
++			SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
++				"are unsupported\n");
++			SERROR("Please recompile with "
++				"Squashfs 2.0 support enabled\n");
++			return 0;
++		}
++	} else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
++			SQUASHFS_MINOR) {
++		SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
++				"filesystem\n", sblk->s_major, sblk->s_minor);
++		SERROR("Please update your kernel\n");
++		return 0;
++	}
++
++	return 1;
++}
++
++
++static int squashfs_fill_super(struct super_block *s, void *data, int silent)
++{
++	struct squashfs_sb_info *msblk;
++	struct squashfs_super_block *sblk;
++	int i;
++	char b[BDEVNAME_SIZE];
++	struct inode *root;
++
++	TRACE("Entered squashfs_read_superblock\n");
++
++	if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
++						GFP_KERNEL))) {
++		ERROR("Failed to allocate superblock\n");
++		goto failure;
++	}
++	memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
++	msblk = s->s_fs_info;
++	sblk = &msblk->sblk;
++
++	msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
++	msblk->devblksize_log2 = ffz(~msblk->devblksize);
++
++	init_MUTEX(&msblk->read_data_mutex);
++	init_MUTEX(&msblk->read_page_mutex);
++	init_MUTEX(&msblk->block_cache_mutex);
++	init_MUTEX(&msblk->fragment_mutex);
++	init_MUTEX(&msblk->meta_index_mutex);
++
++	init_waitqueue_head(&msblk->waitq);
++	init_waitqueue_head(&msblk->fragment_wait_queue);
++
++	if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
++					sizeof(struct squashfs_super_block) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++		SERROR("unable to read superblock\n");
++		goto failed_mount;
++	}
++
++	/* Check it is a SQUASHFS superblock */
++	msblk->swap = 0;
++	if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) {
++		if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) {
++			struct squashfs_super_block ssblk;
++
++			WARNING("Mounting a different endian SQUASHFS "
++				"filesystem on %s\n", bdevname(s->s_bdev, b));
++
++			SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
++			memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
++			msblk->swap = 1;
++		} else  {
++			SERROR("Can't find a SQUASHFS superblock on %s\n",
++							bdevname(s->s_bdev, b));
++			goto failed_mount;
++		}
++	}
++
++	/* Check the MAJOR & MINOR versions */
++	if(!supported_squashfs_filesystem(msblk, silent))
++		goto failed_mount;
++
++	TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
++	TRACE("Inodes are %scompressed\n",
++					SQUASHFS_UNCOMPRESSED_INODES
++					(sblk->flags) ? "un" : "");
++	TRACE("Data is %scompressed\n",
++					SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
++					? "un" : "");
++	TRACE("Check data is %s present in the filesystem\n",
++					SQUASHFS_CHECK_DATA(sblk->flags) ?
++					"" : "not");
++	TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
++	TRACE("Block size %d\n", sblk->block_size);
++	TRACE("Number of inodes %d\n", sblk->inodes);
++	if (sblk->s_major > 1)
++		TRACE("Number of fragments %d\n", sblk->fragments);
++	TRACE("Number of uids %d\n", sblk->no_uids);
++	TRACE("Number of gids %d\n", sblk->no_guids);
++	TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
++	TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
++	if (sblk->s_major > 1)
++		TRACE("sblk->fragment_table_start %llx\n",
++					sblk->fragment_table_start);
++	TRACE("sblk->uid_start %llx\n", sblk->uid_start);
++
++	s->s_flags |= MS_RDONLY;
++	s->s_op = &squashfs_ops;
++
++	/* Init inode_table block pointer array */
++	if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
++					SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
++		ERROR("Failed to allocate block cache\n");
++		goto failed_mount;
++	}
++
++	for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
++		msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
++
++	msblk->next_cache = 0;
++
++	/* Allocate read_data block */
++	msblk->read_size = (sblk->block_size < SQUASHFS_METADATA_SIZE) ?
++					SQUASHFS_METADATA_SIZE :
++					sblk->block_size;
++
++	if (!(msblk->read_data = kmalloc(msblk->read_size, GFP_KERNEL))) {
++		ERROR("Failed to allocate read_data block\n");
++		goto failed_mount;
++	}
++
++	/* Allocate read_page block */
++	if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
++		ERROR("Failed to allocate read_page block\n");
++		goto failed_mount;
++	}
++
++	/* Allocate uid and gid tables */
++	if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
++					sizeof(unsigned int), GFP_KERNEL))) {
++		ERROR("Failed to allocate uid/gid table\n");
++		goto failed_mount;
++	}
++	msblk->guid = msblk->uid + sblk->no_uids;
++
++	if (msblk->swap) {
++		unsigned int suid[sblk->no_uids + sblk->no_guids];
++
++		if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
++					((sblk->no_uids + sblk->no_guids) *
++					 sizeof(unsigned int)) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++			ERROR("unable to read uid/gid table\n");
++			goto failed_mount;
++		}
++
++		SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
++			sblk->no_guids), (sizeof(unsigned int) * 8));
++	} else
++		if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
++					((sblk->no_uids + sblk->no_guids) *
++					 sizeof(unsigned int)) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++			ERROR("unable to read uid/gid table\n");
++			goto failed_mount;
++		}
++
++
++	if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
++		goto allocate_root;
++
++	if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
++				SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
++		ERROR("Failed to allocate fragment block cache\n");
++		goto failed_mount;
++	}
++
++	for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
++		msblk->fragment[i].locked = 0;
++		msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
++		msblk->fragment[i].data = NULL;
++	}
++
++	msblk->next_fragment = 0;
++
++	/* Allocate fragment index table */
++	if (msblk->read_fragment_index_table(s) == 0)
++		goto failed_mount;
++
++allocate_root:
++	if ((root = (msblk->iget)(s, sblk->root_inode)) == NULL)
++		goto failed_mount;
++
++	if ((s->s_root = d_alloc_root(root)) == NULL) {
++		ERROR("Root inode create failed\n");
++		iput(root);
++		goto failed_mount;
++	}
++
++	TRACE("Leaving squashfs_read_super\n");
++	return 0;
++
++failed_mount:
++	kfree(msblk->fragment_index);
++	kfree(msblk->fragment);
++	kfree(msblk->uid);
++	kfree(msblk->read_page);
++	kfree(msblk->read_data);
++	kfree(msblk->block_cache);
++	kfree(msblk->fragment_index_2);
++	kfree(s->s_fs_info);
++	s->s_fs_info = NULL;
++	return -EINVAL;
++
++failure:
++	return -ENOMEM;
++}
++
++
++static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct squashfs_sb_info *msblk = dentry->d_inode->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	TRACE("Entered squashfs_statfs\n");
++
++	buf->f_type = SQUASHFS_MAGIC;
++	buf->f_bsize = sblk->block_size;
++	buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
++	buf->f_bfree = buf->f_bavail = 0;
++	buf->f_files = sblk->inodes;
++	buf->f_ffree = 0;
++	buf->f_namelen = SQUASHFS_NAME_LEN;
++
++	return 0;
++}
++
++
++static int squashfs_symlink_readpage(struct file *file, struct page *page)
++{
++	struct inode *inode = page->mapping->host;
++	int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
++	long long block = SQUASHFS_I(inode)->start_block;
++	int offset = SQUASHFS_I(inode)->offset;
++	void *pageaddr = kmap(page);
++
++	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
++				"%llx, offset %x\n", page->index,
++				SQUASHFS_I(inode)->start_block,
++				SQUASHFS_I(inode)->offset);
++
++	for (length = 0; length < index; length += bytes) {
++		if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
++				block, offset, PAGE_CACHE_SIZE, &block,
++				&offset))) {
++			ERROR("Unable to read symbolic link [%llx:%x]\n", block,
++					offset);
++			goto skip_read;
++		}
++	}
++
++	if (length != index) {
++		ERROR("(squashfs_symlink_readpage) length != index\n");
++		bytes = 0;
++		goto skip_read;
++	}
++
++	bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
++					i_size_read(inode) - length;
++
++	if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
++					offset, bytes, &block, &offset)))
++		ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
++
++skip_read:
++	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
++	kunmap(page);
++	SetPageUptodate(page);
++	unlock_page(page);
++
++	return 0;
++}
++
++
++struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
++{
++	struct meta_index *meta = NULL;
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	int i;
++
++	down(&msblk->meta_index_mutex);
++
++	TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
++
++	if(msblk->meta_index == NULL)
++		goto not_allocated;
++
++	for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
++		if (msblk->meta_index[i].inode_number == inode->i_ino &&
++				msblk->meta_index[i].offset >= offset &&
++				msblk->meta_index[i].offset <= index &&
++				msblk->meta_index[i].locked == 0) {
++			TRACE("locate_meta_index: entry %d, offset %d\n", i,
++					msblk->meta_index[i].offset);
++			meta = &msblk->meta_index[i];
++			offset = meta->offset;
++		}
++
++	if (meta)
++		meta->locked = 1;
++
++not_allocated:
++	up(&msblk->meta_index_mutex);
++
++	return meta;
++}
++
++
++struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
++{
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	struct meta_index *meta = NULL;
++	int i;
++
++	down(&msblk->meta_index_mutex);
++
++	TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
++
++	if(msblk->meta_index == NULL) {
++		if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
++					SQUASHFS_META_NUMBER, GFP_KERNEL))) {
++			ERROR("Failed to allocate meta_index\n");
++			goto failed;
++		}
++		for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
++			msblk->meta_index[i].inode_number = 0;
++			msblk->meta_index[i].locked = 0;
++		}
++		msblk->next_meta_index = 0;
++	}
++
++	for(i = SQUASHFS_META_NUMBER; i &&
++			msblk->meta_index[msblk->next_meta_index].locked; i --)
++		msblk->next_meta_index = (msblk->next_meta_index + 1) %
++			SQUASHFS_META_NUMBER;
++
++	if(i == 0) {
++		TRACE("empty_meta_index: failed!\n");
++		goto failed;
++	}
++
++	TRACE("empty_meta_index: returned meta entry %d, %p\n",
++			msblk->next_meta_index,
++			&msblk->meta_index[msblk->next_meta_index]);
++
++	meta = &msblk->meta_index[msblk->next_meta_index];
++	msblk->next_meta_index = (msblk->next_meta_index + 1) %
++			SQUASHFS_META_NUMBER;
++
++	meta->inode_number = inode->i_ino;
++	meta->offset = offset;
++	meta->skip = skip;
++	meta->entries = 0;
++	meta->locked = 1;
++
++failed:
++	up(&msblk->meta_index_mutex);
++	return meta;
++}
++
++
++void release_meta_index(struct inode *inode, struct meta_index *meta)
++{
++	meta->locked = 0;
++}
++
++
++static int read_block_index(struct super_block *s, int blocks, char *block_list,
++		long long *start_block, int *offset)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	unsigned int *block_listp;
++	int block = 0;
++
++	if (msblk->swap) {
++		char sblock_list[blocks << 2];
++
++		if (!squashfs_get_cached_block(s, sblock_list, *start_block,
++				*offset, blocks << 2, start_block, offset)) {
++			ERROR("Unable to read block list [%llx:%x]\n",
++				*start_block, *offset);
++			goto failure;
++		}
++		SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
++				((unsigned int *)sblock_list), blocks);
++	} else
++		if (!squashfs_get_cached_block(s, block_list, *start_block,
++				*offset, blocks << 2, start_block, offset)) {
++			ERROR("Unable to read block list [%llx:%x]\n",
++				*start_block, *offset);
++			goto failure;
++		}
++
++	for (block_listp = (unsigned int *) block_list; blocks;
++				block_listp++, blocks --)
++		block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
++
++	return block;
++
++failure:
++	return -1;
++}
++
++
++#define SIZE 256
++
++static inline int calculate_skip(int blocks) {
++	int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
++	return skip >= 7 ? 7 : skip + 1;
++}
++
++
++static int get_meta_index(struct inode *inode, int index,
++		long long *index_block, int *index_offset,
++		long long *data_block, char *block_list)
++{
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
++	int offset = 0;
++	struct meta_index *meta;
++	struct meta_entry *meta_entry;
++	long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
++	int cur_offset = SQUASHFS_I(inode)->offset;
++	long long cur_data_block = SQUASHFS_I(inode)->start_block;
++	int i;
++
++	index /= SQUASHFS_META_INDEXES * skip;
++
++	while ( offset < index ) {
++		meta = locate_meta_index(inode, index, offset + 1);
++
++		if (meta == NULL) {
++			if ((meta = empty_meta_index(inode, offset + 1,
++							skip)) == NULL)
++				goto all_done;
++		} else {
++			offset = index < meta->offset + meta->entries ? index :
++				meta->offset + meta->entries - 1;
++			meta_entry = &meta->meta_entry[offset - meta->offset];
++			cur_index_block = meta_entry->index_block + sblk->inode_table_start;
++			cur_offset = meta_entry->offset;
++			cur_data_block = meta_entry->data_block;
++			TRACE("get_meta_index: offset %d, meta->offset %d, "
++				"meta->entries %d\n", offset, meta->offset,
++				meta->entries);
++			TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
++				" data_block 0x%llx\n", cur_index_block,
++				cur_offset, cur_data_block);
++		}
++
++		for (i = meta->offset + meta->entries; i <= index &&
++				i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
++			int blocks = skip * SQUASHFS_META_INDEXES;
++
++			while (blocks) {
++				int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
++					blocks;
++				int res = read_block_index(inode->i_sb, block,
++					block_list, &cur_index_block,
++					&cur_offset);
++
++				if (res == -1)
++					goto failed;
++
++				cur_data_block += res;
++				blocks -= block;
++			}
++
++			meta_entry = &meta->meta_entry[i - meta->offset];
++			meta_entry->index_block = cur_index_block - sblk->inode_table_start;
++			meta_entry->offset = cur_offset;
++			meta_entry->data_block = cur_data_block;
++			meta->entries ++;
++			offset ++;
++		}
++
++		TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
++				meta->offset, meta->entries);
++
++		release_meta_index(inode, meta);
++	}
++
++all_done:
++	*index_block = cur_index_block;
++	*index_offset = cur_offset;
++	*data_block = cur_data_block;
++
++	return offset * SQUASHFS_META_INDEXES * skip;
++
++failed:
++	release_meta_index(inode, meta);
++	return -1;
++}
++
++
++static long long read_blocklist(struct inode *inode, int index,
++				int readahead_blks, char *block_list,
++				unsigned short **block_p, unsigned int *bsize)
++{
++	long long block_ptr;
++	int offset;
++	long long block;
++	int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
++		block_list);
++
++	TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
++		       " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
++		       block);
++
++	if(res == -1)
++		goto failure;
++
++	index -= res;
++
++	while ( index ) {
++		int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
++		int res = read_block_index(inode->i_sb, blocks, block_list,
++			&block_ptr, &offset);
++		if (res == -1)
++			goto failure;
++		block += res;
++		index -= blocks;
++	}
++
++	if (read_block_index(inode->i_sb, 1, block_list,
++			&block_ptr, &offset) == -1)
++		goto failure;
++	*bsize = *((unsigned int *) block_list);
++
++	return block;
++
++failure:
++	return 0;
++}
++
++
++static int squashfs_readpage(struct file *file, struct page *page)
++{
++	struct inode *inode = page->mapping->host;
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	unsigned char block_list[SIZE];
++	long long block;
++	unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
++	int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
++ 	void *pageaddr;
++	struct squashfs_fragment_cache *fragment = NULL;
++	char *data_ptr = msblk->read_page;
++
++	int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
++	int start_index = page->index & ~mask;
++	int end_index = start_index | mask;
++
++	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
++					page->index,
++					SQUASHFS_I(inode)->start_block);
++
++	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
++					PAGE_CACHE_SHIFT))
++		goto skip_read;
++
++	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
++					|| index < (i_size_read(inode) >>
++					sblk->block_log)) {
++		if ((block = (msblk->read_blocklist)(inode, index, 1,
++					block_list, NULL, &bsize)) == 0)
++			goto skip_read;
++
++		down(&msblk->read_page_mutex);
++
++		if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
++					block, bsize, NULL))) {
++			ERROR("Unable to read page, block %llx, size %x\n", block,
++					bsize);
++			up(&msblk->read_page_mutex);
++			goto skip_read;
++		}
++	} else {
++		if ((fragment = get_cached_fragment(inode->i_sb,
++					SQUASHFS_I(inode)->
++					u.s1.fragment_start_block,
++					SQUASHFS_I(inode)->u.s1.fragment_size))
++					== NULL) {
++			ERROR("Unable to read page, block %llx, size %x\n",
++					SQUASHFS_I(inode)->
++					u.s1.fragment_start_block,
++					(int) SQUASHFS_I(inode)->
++					u.s1.fragment_size);
++			goto skip_read;
++		}
++		bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
++					(i_size_read(inode) & (sblk->block_size
++					- 1));
++		byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
++		data_ptr = fragment->data;
++	}
++
++	for (i = start_index; i <= end_index && byte_offset < bytes;
++					i++, byte_offset += PAGE_CACHE_SIZE) {
++		struct page *push_page;
++		int available_bytes = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
++					PAGE_CACHE_SIZE : bytes - byte_offset;
++
++		TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
++					bytes, i, byte_offset, available_bytes);
++
++		if (i == page->index)  {
++			pageaddr = kmap_atomic(page, KM_USER0);
++			memcpy(pageaddr, data_ptr + byte_offset,
++					available_bytes);
++			memset(pageaddr + available_bytes, 0,
++					PAGE_CACHE_SIZE - available_bytes);
++			kunmap_atomic(pageaddr, KM_USER0);
++			flush_dcache_page(page);
++			SetPageUptodate(page);
++			unlock_page(page);
++		} else if ((push_page =
++				grab_cache_page_nowait(page->mapping, i))) {
++ 			pageaddr = kmap_atomic(push_page, KM_USER0);
++
++			memcpy(pageaddr, data_ptr + byte_offset,
++					available_bytes);
++			memset(pageaddr + available_bytes, 0,
++					PAGE_CACHE_SIZE - available_bytes);
++			kunmap_atomic(pageaddr, KM_USER0);
++			flush_dcache_page(push_page);
++			SetPageUptodate(push_page);
++			unlock_page(push_page);
++			page_cache_release(push_page);
++		}
++	}
++
++	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
++					|| index < (i_size_read(inode) >>
++					sblk->block_log))
++		up(&msblk->read_page_mutex);
++	else
++		release_cached_fragment(msblk, fragment);
++
++	return 0;
++
++skip_read:
++	pageaddr = kmap_atomic(page, KM_USER0);
++	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
++	kunmap_atomic(pageaddr, KM_USER0);
++	flush_dcache_page(page);
++	SetPageUptodate(page);
++	unlock_page(page);
++
++	return 0;
++}
++
++
++static int squashfs_readpage4K(struct file *file, struct page *page)
++{
++	struct inode *inode = page->mapping->host;
++	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	unsigned char block_list[SIZE];
++	long long block;
++	unsigned int bsize, bytes = 0;
++ 	void *pageaddr;
++
++	TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
++					page->index,
++					SQUASHFS_I(inode)->start_block);
++
++	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
++					PAGE_CACHE_SHIFT)) {
++		pageaddr = kmap_atomic(page, KM_USER0);
++		goto skip_read;
++	}
++
++	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
++					|| page->index < (i_size_read(inode) >>
++					sblk->block_log)) {
++		block = (msblk->read_blocklist)(inode, page->index, 1,
++					block_list, NULL, &bsize);
++
++		down(&msblk->read_page_mutex);
++		bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
++					bsize, NULL);
++		pageaddr = kmap_atomic(page, KM_USER0);
++		if (bytes)
++			memcpy(pageaddr, msblk->read_page, bytes);
++		else
++			ERROR("Unable to read page, block %llx, size %x\n",
++					block, bsize);
++		up(&msblk->read_page_mutex);
++	} else {
++		struct squashfs_fragment_cache *fragment =
++			get_cached_fragment(inode->i_sb,
++					SQUASHFS_I(inode)->
++					u.s1.fragment_start_block,
++					SQUASHFS_I(inode)-> u.s1.fragment_size);
++		pageaddr = kmap_atomic(page, KM_USER0);
++		if (fragment) {
++			bytes = i_size_read(inode) & (sblk->block_size - 1);
++			memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
++					u.s1.fragment_offset, bytes);
++			release_cached_fragment(msblk, fragment);
++		} else
++			ERROR("Unable to read page, block %llx, size %x\n",
++					SQUASHFS_I(inode)->
++					u.s1.fragment_start_block, (int)
++					SQUASHFS_I(inode)-> u.s1.fragment_size);
++	}
++
++skip_read:
++	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
++	kunmap_atomic(pageaddr, KM_USER0);
++	flush_dcache_page(page);
++	SetPageUptodate(page);
++	unlock_page(page);
++
++	return 0;
++}
++
++
++static int get_dir_index_using_offset(struct super_block *s, long long
++				*next_block, unsigned int *next_offset,
++				long long index_start,
++				unsigned int index_offset, int i_count,
++				long long f_pos)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int i, length = 0;
++	struct squashfs_dir_index index;
++
++	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
++					i_count, (unsigned int) f_pos);
++
++	f_pos =- 3;
++	if (f_pos == 0)
++		goto finish;
++
++	for (i = 0; i < i_count; i++) {
++		if (msblk->swap) {
++			struct squashfs_dir_index sindex;
++			squashfs_get_cached_block(s, (char *) &sindex,
++					index_start, index_offset,
++					sizeof(sindex), &index_start,
++					&index_offset);
++			SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
++		} else
++			squashfs_get_cached_block(s, (char *) &index,
++					index_start, index_offset,
++					sizeof(index), &index_start,
++					&index_offset);
++
++		if (index.index > f_pos)
++			break;
++
++		squashfs_get_cached_block(s, NULL, index_start, index_offset,
++					index.size + 1, &index_start,
++					&index_offset);
++
++		length = index.index;
++		*next_block = index.start_block + sblk->directory_table_start;
++	}
++
++	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
++
++finish:
++	return length + 3;
++}
++
++
++static int get_dir_index_using_name(struct super_block *s, long long
++				*next_block, unsigned int *next_offset,
++				long long index_start,
++				unsigned int index_offset, int i_count,
++				const char *name, int size)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int i, length = 0;
++	char buffer[sizeof(struct squashfs_dir_index) + SQUASHFS_NAME_LEN + 1];
++	struct squashfs_dir_index *index = (struct squashfs_dir_index *) buffer;
++	char str[SQUASHFS_NAME_LEN + 1];
++
++	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
++
++	strncpy(str, name, size);
++	str[size] = '\0';
++
++	for (i = 0; i < i_count; i++) {
++		if (msblk->swap) {
++			struct squashfs_dir_index sindex;
++			squashfs_get_cached_block(s, (char *) &sindex,
++					index_start, index_offset,
++					sizeof(sindex), &index_start,
++					&index_offset);
++			SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
++		} else
++			squashfs_get_cached_block(s, (char *) index,
++					index_start, index_offset,
++					sizeof(struct squashfs_dir_index),
++					&index_start, &index_offset);
++
++		squashfs_get_cached_block(s, index->name, index_start,
++					index_offset, index->size + 1,
++					&index_start, &index_offset);
++
++		index->name[index->size + 1] = '\0';
++
++		if (strcmp(index->name, str) > 0)
++			break;
++
++		length = index->index;
++		*next_block = index->start_block + sblk->directory_table_start;
++	}
++
++	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
++	return length + 3;
++}
++
++
++static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
++{
++	struct inode *i = file->f_dentry->d_inode;
++	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long next_block = SQUASHFS_I(i)->start_block +
++		sblk->directory_table_start;
++	int next_offset = SQUASHFS_I(i)->offset, length = 0, dirs_read = 0,
++		dir_count;
++	struct squashfs_dir_header dirh;
++	char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN + 1];
++	struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer;
++
++	TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
++
++	while(file->f_pos < 3) {
++		char *name;
++		int size, i_ino;
++
++		if(file->f_pos == 0) {
++			name = ".";
++			size = 1;
++			i_ino = i->i_ino;
++		} else {
++			name = "..";
++			size = 2;
++			i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
++		}
++		TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
++				(unsigned int) dirent, name, size, (int)
++				file->f_pos, i_ino,
++				squashfs_filetype_table[1]);
++
++		if (filldir(dirent, name, size,
++				file->f_pos, i_ino,
++				squashfs_filetype_table[1]) < 0) {
++				TRACE("Filldir returned less than 0\n");
++				goto finish;
++		}
++		file->f_pos += size;
++		dirs_read++;
++	}
++
++	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_start,
++				SQUASHFS_I(i)->u.s2.directory_index_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_count,
++				file->f_pos);
++
++	while (length < i_size_read(i)) {
++		/* read directory header */
++		if (msblk->swap) {
++			struct squashfs_dir_header sdirh;
++
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
++					next_block, next_offset, sizeof(sdirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(sdirh);
++			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
++		} else {
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
++					next_block, next_offset, sizeof(dirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(dirh);
++		}
++
++		dir_count = dirh.count + 1;
++		while (dir_count--) {
++			if (msblk->swap) {
++				struct squashfs_dir_entry sdire;
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						&sdire, next_block, next_offset,
++						sizeof(sdire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(sdire);
++				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
++			} else {
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						dire, next_block, next_offset,
++						sizeof(*dire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(*dire);
++			}
++
++			if (!squashfs_get_cached_block(i->i_sb, dire->name,
++						next_block, next_offset,
++						dire->size + 1, &next_block,
++						&next_offset))
++				goto failed_read;
++
++			length += dire->size + 1;
++
++			if (file->f_pos >= length)
++				continue;
++
++			dire->name[dire->size + 1] = '\0';
++
++			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
++					(unsigned int) dirent, dire->name,
++					dire->size + 1, (int) file->f_pos,
++					dirh.start_block, dire->offset,
++					dirh.inode_number + dire->inode_number,
++					squashfs_filetype_table[dire->type]);
++
++			if (filldir(dirent, dire->name, dire->size + 1,
++					file->f_pos,
++					dirh.inode_number + dire->inode_number,
++					squashfs_filetype_table[dire->type])
++					< 0) {
++				TRACE("Filldir returned less than 0\n");
++				goto finish;
++			}
++			file->f_pos = length;
++			dirs_read++;
++		}
++	}
++
++finish:
++	return dirs_read;
++
++failed_read:
++	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
++		next_offset);
++	return 0;
++}
++
++
++static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
++				struct nameidata *nd)
++{
++	const unsigned char *name = dentry->d_name.name;
++	int len = dentry->d_name.len;
++	struct inode *inode = NULL;
++	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long next_block = SQUASHFS_I(i)->start_block +
++				sblk->directory_table_start;
++	int next_offset = SQUASHFS_I(i)->offset, length = 0,
++				dir_count;
++	struct squashfs_dir_header dirh;
++	char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN];
++	struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer;
++
++	TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
++
++	if (len > SQUASHFS_NAME_LEN)
++		goto exit_loop;
++
++	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_start,
++				SQUASHFS_I(i)->u.s2.directory_index_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_count, name,
++				len);
++
++	while (length < i_size_read(i)) {
++		/* read directory header */
++		if (msblk->swap) {
++			struct squashfs_dir_header sdirh;
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
++					next_block, next_offset, sizeof(sdirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(sdirh);
++			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
++		} else {
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
++					next_block, next_offset, sizeof(dirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(dirh);
++		}
++
++		dir_count = dirh.count + 1;
++		while (dir_count--) {
++			if (msblk->swap) {
++				struct squashfs_dir_entry sdire;
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						&sdire, next_block,next_offset,
++						sizeof(sdire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(sdire);
++				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
++			} else {
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						dire, next_block,next_offset,
++						sizeof(*dire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(*dire);
++			}
++
++			if (!squashfs_get_cached_block(i->i_sb, dire->name,
++					next_block, next_offset, dire->size + 1,
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += dire->size + 1;
++
++			if (name[0] < dire->name[0])
++				goto exit_loop;
++
++			if ((len == dire->size + 1) && !strncmp(name,
++						dire->name, len)) {
++				squashfs_inode_t ino =
++					SQUASHFS_MKINODE(dirh.start_block,
++					dire->offset);
++
++				TRACE("calling squashfs_iget for directory "
++					"entry %s, inode %x:%x, %d\n", name,
++					dirh.start_block, dire->offset,
++					dirh.inode_number + dire->inode_number);
++
++				inode = (msblk->iget)(i->i_sb, ino);
++
++				goto exit_loop;
++			}
++		}
++	}
++
++exit_loop:
++	d_add(dentry, inode);
++	return ERR_PTR(0);
++
++failed_read:
++	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
++		next_offset);
++	goto exit_loop;
++}
++
++
++static void squashfs_put_super(struct super_block *s)
++{
++	int i;
++
++	if (s->s_fs_info) {
++		struct squashfs_sb_info *sbi = s->s_fs_info;
++		if (sbi->block_cache)
++			for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
++				if (sbi->block_cache[i].block !=
++							SQUASHFS_INVALID_BLK)
++					kfree(sbi->block_cache[i].data);
++		if (sbi->fragment)
++			for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++)
++				SQUASHFS_FREE(sbi->fragment[i].data);
++		kfree(sbi->fragment);
++		kfree(sbi->block_cache);
++		kfree(sbi->read_data);
++		kfree(sbi->read_page);
++		kfree(sbi->uid);
++		kfree(sbi->fragment_index);
++		kfree(sbi->fragment_index_2);
++		kfree(sbi->meta_index);
++		kfree(s->s_fs_info);
++		s->s_fs_info = NULL;
++	}
++}
++
++
++static int squashfs_get_sb(struct file_system_type *fs_type,
++			int flags, const char *dev_name, void *data,
++			struct vfsmount *mnt)
++{
++	return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super, mnt);
++}
++
++
++static int __init init_squashfs_fs(void)
++{
++	int err = init_inodecache();
++	if (err)
++		goto out;
++
++	printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) "
++		"Phillip Lougher\n");
++
++	if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
++		ERROR("Failed to allocate zlib workspace\n");
++		destroy_inodecache();
++		err = -ENOMEM;
++		goto out;
++	}
++
++	if ((err = register_filesystem(&squashfs_fs_type))) {
++		vfree(stream.workspace);
++		destroy_inodecache();
++	}
++
++out:
++	return err;
++}
++
++
++static void __exit exit_squashfs_fs(void)
++{
++	vfree(stream.workspace);
++	unregister_filesystem(&squashfs_fs_type);
++	destroy_inodecache();
++}
++
++
++static struct kmem_cache * squashfs_inode_cachep;
++
++
++static struct inode *squashfs_alloc_inode(struct super_block *sb)
++{
++	struct squashfs_inode_info *ei;
++	ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
++	if (!ei)
++		return NULL;
++	return &ei->vfs_inode;
++}
++
++
++static void squashfs_destroy_inode(struct inode *inode)
++{
++	kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
++}
++
++
++static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
++{
++	struct squashfs_inode_info *ei = foo;
++
++	inode_init_once(&ei->vfs_inode);
++}
++
++
++static int __init init_inodecache(void)
++{
++	squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
++	     sizeof(struct squashfs_inode_info),
++	     0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
++	     init_once, NULL);
++	if (squashfs_inode_cachep == NULL)
++		return -ENOMEM;
++	return 0;
++}
++
++
++static void destroy_inodecache(void)
++{
++	kmem_cache_destroy(squashfs_inode_cachep);
++}
++
++
++module_init(init_squashfs_fs);
++module_exit(exit_squashfs_fs);
++MODULE_DESCRIPTION("squashfs, a compressed read-only filesystem");
++MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>");
++MODULE_LICENSE("GPL");
+diff -urN linux-2.6.21.1.old/fs/squashfs/Makefile linux-2.6.21.1.dev/fs/squashfs/Makefile
+--- linux-2.6.21.1.old/fs/squashfs/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/squashfs/Makefile	2007-05-26 19:00:37.123351456 +0200
+@@ -0,0 +1,7 @@
++#
++# Makefile for the linux squashfs routines.
++#
++
++obj-$(CONFIG_SQUASHFS) += squashfs.o
++squashfs-y += inode.o
++squashfs-y += squashfs2_0.o
+diff -urN linux-2.6.21.1.old/fs/squashfs/squashfs2_0.c linux-2.6.21.1.dev/fs/squashfs/squashfs2_0.c
+--- linux-2.6.21.1.old/fs/squashfs/squashfs2_0.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/squashfs/squashfs2_0.c	2007-05-26 19:00:37.125351152 +0200
+@@ -0,0 +1,758 @@
++/*
++ * Squashfs - a compressed read only filesystem for Linux
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs2_0.c
++ */
++
++#include <linux/types.h>
++#include <linux/squashfs_fs.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/squashfs_fs_sb.h>
++#include <linux/squashfs_fs_i.h>
++#include <linux/buffer_head.h>
++#include <linux/vfs.h>
++#include <linux/init.h>
++#include <linux/dcache.h>
++#include <linux/wait.h>
++#include <linux/zlib.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <asm/semaphore.h>
++
++#include "squashfs.h"
++static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir);
++static struct dentry *squashfs_lookup_2(struct inode *, struct dentry *,
++				struct nameidata *);
++
++static struct file_operations squashfs_dir_ops_2 = {
++	.read = generic_read_dir,
++	.readdir = squashfs_readdir_2
++};
++
++static struct inode_operations squashfs_dir_inode_ops_2 = {
++	.lookup = squashfs_lookup_2
++};
++
++static unsigned char squashfs_filetype_table[] = {
++	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
++};
++
++static int read_fragment_index_table_2(struct super_block *s)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	if (!(msblk->fragment_index_2 = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES_2
++					(sblk->fragments), GFP_KERNEL))) {
++		ERROR("Failed to allocate uid/gid table\n");
++		return 0;
++	}
++
++	if (SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments) &&
++					!squashfs_read_data(s, (char *)
++					msblk->fragment_index_2,
++					sblk->fragment_table_start,
++					SQUASHFS_FRAGMENT_INDEX_BYTES_2
++					(sblk->fragments) |
++					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL)) {
++		ERROR("unable to read fragment index table\n");
++		return 0;
++	}
++
++	if (msblk->swap) {
++		int i;
++		unsigned int fragment;
++
++		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES_2(sblk->fragments);
++									i++) {
++			SQUASHFS_SWAP_FRAGMENT_INDEXES_2((&fragment),
++						&msblk->fragment_index_2[i], 1);
++			msblk->fragment_index_2[i] = fragment;
++		}
++	}
++
++	return 1;
++}
++
++
++static int get_fragment_location_2(struct super_block *s, unsigned int fragment,
++				long long *fragment_start_block,
++				unsigned int *fragment_size)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	long long start_block =
++		msblk->fragment_index_2[SQUASHFS_FRAGMENT_INDEX_2(fragment)];
++	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET_2(fragment);
++	struct squashfs_fragment_entry_2 fragment_entry;
++
++	if (msblk->swap) {
++		struct squashfs_fragment_entry_2 sfragment_entry;
++
++		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
++					start_block, offset,
++					sizeof(sfragment_entry), &start_block,
++					&offset))
++			goto out;
++		SQUASHFS_SWAP_FRAGMENT_ENTRY_2(&fragment_entry, &sfragment_entry);
++	} else
++		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
++					start_block, offset,
++					sizeof(fragment_entry), &start_block,
++					&offset))
++			goto out;
++
++	*fragment_start_block = fragment_entry.start_block;
++	*fragment_size = fragment_entry.size;
++
++	return 1;
++
++out:
++	return 0;
++}
++
++
++static struct inode *squashfs_new_inode(struct super_block *s,
++		struct squashfs_base_inode_header_2 *inodeb, unsigned int ino)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	struct inode *i = new_inode(s);
++
++	if (i) {
++		i->i_ino = ino;
++		i->i_mtime.tv_sec = sblk->mkfs_time;
++		i->i_atime.tv_sec = sblk->mkfs_time;
++		i->i_ctime.tv_sec = sblk->mkfs_time;
++		i->i_uid = msblk->uid[inodeb->uid];
++		i->i_mode = inodeb->mode;
++		i->i_nlink = 1;
++		i->i_size = 0;
++		if (inodeb->guid == SQUASHFS_GUIDS)
++			i->i_gid = i->i_uid;
++		else
++			i->i_gid = msblk->guid[inodeb->guid];
++	}
++
++	return i;
++}
++
++
++static struct inode *squashfs_iget_2(struct super_block *s, squashfs_inode_t inode)
++{
++	struct inode *i;
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	unsigned int block = SQUASHFS_INODE_BLK(inode) +
++		sblk->inode_table_start;
++	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
++	unsigned int ino = SQUASHFS_MK_VFS_INODE(block
++		- sblk->inode_table_start, offset);
++	long long next_block;
++	unsigned int next_offset;
++	union squashfs_inode_header_2 id, sid;
++	struct squashfs_base_inode_header_2 *inodeb = &id.base,
++					  *sinodeb = &sid.base;
++
++	TRACE("Entered squashfs_iget\n");
++
++	if (msblk->swap) {
++		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
++					offset, sizeof(*sinodeb), &next_block,
++					&next_offset))
++			goto failed_read;
++		SQUASHFS_SWAP_BASE_INODE_HEADER_2(inodeb, sinodeb,
++					sizeof(*sinodeb));
++	} else
++		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
++					offset, sizeof(*inodeb), &next_block,
++					&next_offset))
++			goto failed_read;
++
++	switch(inodeb->inode_type) {
++		case SQUASHFS_FILE_TYPE: {
++			struct squashfs_reg_inode_header_2 *inodep = &id.reg;
++			struct squashfs_reg_inode_header_2 *sinodep = &sid.reg;
++			long long frag_blk;
++			unsigned int frag_size;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_REG_INODE_HEADER_2(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			frag_blk = SQUASHFS_INVALID_BLK;
++			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
++					!get_fragment_location_2(s,
++					inodep->fragment, &frag_blk, &frag_size))
++				goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_size = inodep->file_size;
++			i->i_fop = &generic_ro_fops;
++			i->i_mode |= S_IFREG;
++			i->i_mtime.tv_sec = inodep->mtime;
++			i->i_atime.tv_sec = inodep->mtime;
++			i->i_ctime.tv_sec = inodep->mtime;
++			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
++			i->i_blksize = PAGE_CACHE_SIZE;
++			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
++			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
++			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++			if (sblk->block_size > 4096)
++				i->i_data.a_ops = &squashfs_aops;
++			else
++				i->i_data.a_ops = &squashfs_aops_4K;
++
++			TRACE("File inode %x:%x, start_block %x, "
++					"block_list_start %llx, offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, next_block,
++					next_offset);
++			break;
++		}
++		case SQUASHFS_DIR_TYPE: {
++			struct squashfs_dir_inode_header_2 *inodep = &id.dir;
++			struct squashfs_dir_inode_header_2 *sinodep = &sid.dir;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_DIR_INODE_HEADER_2(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_size = inodep->file_size;
++			i->i_op = &squashfs_dir_inode_ops_2;
++			i->i_fop = &squashfs_dir_ops_2;
++			i->i_mode |= S_IFDIR;
++			i->i_mtime.tv_sec = inodep->mtime;
++			i->i_atime.tv_sec = inodep->mtime;
++			i->i_ctime.tv_sec = inodep->mtime;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->offset = inodep->offset;
++			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
++			SQUASHFS_I(i)->u.s2.parent_inode = 0;
++
++			TRACE("Directory inode %x:%x, start_block %x, offset "
++					"%x\n", SQUASHFS_INODE_BLK(inode),
++					offset, inodep->start_block,
++					inodep->offset);
++			break;
++		}
++		case SQUASHFS_LDIR_TYPE: {
++			struct squashfs_ldir_inode_header_2 *inodep = &id.ldir;
++			struct squashfs_ldir_inode_header_2 *sinodep = &sid.ldir;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_LDIR_INODE_HEADER_2(inodep,
++						sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_size = inodep->file_size;
++			i->i_op = &squashfs_dir_inode_ops_2;
++			i->i_fop = &squashfs_dir_ops_2;
++			i->i_mode |= S_IFDIR;
++			i->i_mtime.tv_sec = inodep->mtime;
++			i->i_atime.tv_sec = inodep->mtime;
++			i->i_ctime.tv_sec = inodep->mtime;
++			SQUASHFS_I(i)->start_block = inodep->start_block;
++			SQUASHFS_I(i)->offset = inodep->offset;
++			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
++			SQUASHFS_I(i)->u.s2.directory_index_offset =
++								next_offset;
++			SQUASHFS_I(i)->u.s2.directory_index_count =
++								inodep->i_count;
++			SQUASHFS_I(i)->u.s2.parent_inode = 0;
++
++			TRACE("Long directory inode %x:%x, start_block %x, "
++					"offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->start_block, inodep->offset);
++			break;
++		}
++		case SQUASHFS_SYMLINK_TYPE: {
++			struct squashfs_symlink_inode_header_2 *inodep =
++								&id.symlink;
++			struct squashfs_symlink_inode_header_2 *sinodep =
++								&sid.symlink;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(inodep,
++								sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_size = inodep->symlink_size;
++			i->i_op = &page_symlink_inode_operations;
++			i->i_data.a_ops = &squashfs_symlink_aops;
++			i->i_mode |= S_IFLNK;
++			SQUASHFS_I(i)->start_block = next_block;
++			SQUASHFS_I(i)->offset = next_offset;
++
++			TRACE("Symbolic link inode %x:%x, start_block %llx, "
++					"offset %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					next_block, next_offset);
++			break;
++		 }
++		 case SQUASHFS_BLKDEV_TYPE:
++		 case SQUASHFS_CHRDEV_TYPE: {
++			struct squashfs_dev_inode_header_2 *inodep = &id.dev;
++			struct squashfs_dev_inode_header_2 *sinodep = &sid.dev;
++
++			if (msblk->swap) {
++				if (!squashfs_get_cached_block(s, (char *)
++						sinodep, block, offset,
++						sizeof(*sinodep), &next_block,
++						&next_offset))
++					goto failed_read;
++				SQUASHFS_SWAP_DEV_INODE_HEADER_2(inodep, sinodep);
++			} else
++				if (!squashfs_get_cached_block(s, (char *)
++						inodep, block, offset,
++						sizeof(*inodep), &next_block,
++						&next_offset))
++					goto failed_read;
++
++			if ((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_mode |= (inodeb->inode_type ==
++					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
++					S_IFBLK;
++			init_special_inode(i, i->i_mode,
++					old_decode_dev(inodep->rdev));
++
++			TRACE("Device inode %x:%x, rdev %x\n",
++					SQUASHFS_INODE_BLK(inode), offset,
++					inodep->rdev);
++			break;
++		 }
++		 case SQUASHFS_FIFO_TYPE:
++		 case SQUASHFS_SOCKET_TYPE: {
++			if ((i = squashfs_new_inode(s, inodeb, ino)) == NULL)
++				goto failed_read1;
++
++			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
++							? S_IFIFO : S_IFSOCK;
++			init_special_inode(i, i->i_mode, 0);
++			break;
++		 }
++		 default:
++			ERROR("Unknown inode type %d in squashfs_iget!\n",
++					inodeb->inode_type);
++			goto failed_read1;
++	}
++
++	insert_inode_hash(i);
++	return i;
++
++failed_read:
++	ERROR("Unable to read inode [%x:%x]\n", block, offset);
++
++failed_read1:
++	return NULL;
++}
++
++
++static int get_dir_index_using_offset(struct super_block *s, long long
++				*next_block, unsigned int *next_offset,
++				long long index_start,
++				unsigned int index_offset, int i_count,
++				long long f_pos)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int i, length = 0;
++	struct squashfs_dir_index_2 index;
++
++	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
++					i_count, (unsigned int) f_pos);
++
++	if (f_pos == 0)
++		goto finish;
++
++	for (i = 0; i < i_count; i++) {
++		if (msblk->swap) {
++			struct squashfs_dir_index_2 sindex;
++			squashfs_get_cached_block(s, (char *) &sindex,
++					index_start, index_offset,
++					sizeof(sindex), &index_start,
++					&index_offset);
++			SQUASHFS_SWAP_DIR_INDEX_2(&index, &sindex);
++		} else
++			squashfs_get_cached_block(s, (char *) &index,
++					index_start, index_offset,
++					sizeof(index), &index_start,
++					&index_offset);
++
++		if (index.index > f_pos)
++			break;
++
++		squashfs_get_cached_block(s, NULL, index_start, index_offset,
++					index.size + 1, &index_start,
++					&index_offset);
++
++		length = index.index;
++		*next_block = index.start_block + sblk->directory_table_start;
++	}
++
++	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
++
++finish:
++	return length;
++}
++
++
++static int get_dir_index_using_name(struct super_block *s, long long
++				*next_block, unsigned int *next_offset,
++				long long index_start,
++				unsigned int index_offset, int i_count,
++				const char *name, int size)
++{
++	struct squashfs_sb_info *msblk = s->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	int i, length = 0;
++	char buffer[sizeof(struct squashfs_dir_index_2) + SQUASHFS_NAME_LEN + 1];
++	struct squashfs_dir_index_2 *index = (struct squashfs_dir_index_2 *) buffer;
++	char str[SQUASHFS_NAME_LEN + 1];
++
++	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
++
++	strncpy(str, name, size);
++	str[size] = '\0';
++
++	for (i = 0; i < i_count; i++) {
++		if (msblk->swap) {
++			struct squashfs_dir_index_2 sindex;
++			squashfs_get_cached_block(s, (char *) &sindex,
++					index_start, index_offset,
++					sizeof(sindex), &index_start,
++					&index_offset);
++			SQUASHFS_SWAP_DIR_INDEX_2(index, &sindex);
++		} else
++			squashfs_get_cached_block(s, (char *) index,
++					index_start, index_offset,
++					sizeof(struct squashfs_dir_index_2),
++					&index_start, &index_offset);
++
++		squashfs_get_cached_block(s, index->name, index_start,
++					index_offset, index->size + 1,
++					&index_start, &index_offset);
++
++		index->name[index->size + 1] = '\0';
++
++		if (strcmp(index->name, str) > 0)
++			break;
++
++		length = index->index;
++		*next_block = index->start_block + sblk->directory_table_start;
++	}
++
++	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
++	return length;
++}
++
++
++static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir)
++{
++	struct inode *i = file->f_dentry->d_inode;
++	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long next_block = SQUASHFS_I(i)->start_block +
++		sblk->directory_table_start;
++	int next_offset = SQUASHFS_I(i)->offset, length = 0, dirs_read = 0,
++		dir_count;
++	struct squashfs_dir_header_2 dirh;
++	char buffer[sizeof(struct squashfs_dir_entry_2) + SQUASHFS_NAME_LEN + 1];
++	struct squashfs_dir_entry_2 *dire = (struct squashfs_dir_entry_2 *) buffer;
++
++	TRACE("Entered squashfs_readdir_2 [%llx:%x]\n", next_block, next_offset);
++
++	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_start,
++				SQUASHFS_I(i)->u.s2.directory_index_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_count,
++				file->f_pos);
++
++	while (length < i_size_read(i)) {
++		/* read directory header */
++		if (msblk->swap) {
++			struct squashfs_dir_header_2 sdirh;
++
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
++					next_block, next_offset, sizeof(sdirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(sdirh);
++			SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
++		} else {
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
++					next_block, next_offset, sizeof(dirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(dirh);
++		}
++
++		dir_count = dirh.count + 1;
++		while (dir_count--) {
++			if (msblk->swap) {
++				struct squashfs_dir_entry_2 sdire;
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						&sdire, next_block, next_offset,
++						sizeof(sdire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(sdire);
++				SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
++			} else {
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						dire, next_block, next_offset,
++						sizeof(*dire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(*dire);
++			}
++
++			if (!squashfs_get_cached_block(i->i_sb, dire->name,
++						next_block, next_offset,
++						dire->size + 1, &next_block,
++						&next_offset))
++				goto failed_read;
++
++			length += dire->size + 1;
++
++			if (file->f_pos >= length)
++				continue;
++
++			dire->name[dire->size + 1] = '\0';
++
++			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d)\n",
++					(unsigned int) dirent, dire->name,
++					dire->size + 1, (int) file->f_pos,
++					dirh.start_block, dire->offset,
++					squashfs_filetype_table[dire->type]);
++
++			if (filldir(dirent, dire->name, dire->size + 1,
++					file->f_pos, SQUASHFS_MK_VFS_INODE(
++					dirh.start_block, dire->offset),
++					squashfs_filetype_table[dire->type])
++					< 0) {
++				TRACE("Filldir returned less than 0\n");
++				goto finish;
++			}
++			file->f_pos = length;
++			dirs_read++;
++		}
++	}
++
++finish:
++	return dirs_read;
++
++failed_read:
++	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
++		next_offset);
++	return 0;
++}
++
++
++static struct dentry *squashfs_lookup_2(struct inode *i, struct dentry *dentry,
++				struct nameidata *nd)
++{
++	const unsigned char *name = dentry->d_name.name;
++	int len = dentry->d_name.len;
++	struct inode *inode = NULL;
++	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
++	struct squashfs_super_block *sblk = &msblk->sblk;
++	long long next_block = SQUASHFS_I(i)->start_block +
++				sblk->directory_table_start;
++	int next_offset = SQUASHFS_I(i)->offset, length = 0,
++				dir_count;
++	struct squashfs_dir_header_2 dirh;
++	char buffer[sizeof(struct squashfs_dir_entry_2) + SQUASHFS_NAME_LEN];
++	struct squashfs_dir_entry_2 *dire = (struct squashfs_dir_entry_2 *) buffer;
++	int sorted = sblk->s_major == 2 && sblk->s_minor >= 1;
++
++	TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
++
++	if (len > SQUASHFS_NAME_LEN)
++		goto exit_loop;
++
++	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_start,
++				SQUASHFS_I(i)->u.s2.directory_index_offset,
++				SQUASHFS_I(i)->u.s2.directory_index_count, name,
++				len);
++
++	while (length < i_size_read(i)) {
++		/* read directory header */
++		if (msblk->swap) {
++			struct squashfs_dir_header_2 sdirh;
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
++					next_block, next_offset, sizeof(sdirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(sdirh);
++			SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
++		} else {
++			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
++					next_block, next_offset, sizeof(dirh),
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += sizeof(dirh);
++		}
++
++		dir_count = dirh.count + 1;
++		while (dir_count--) {
++			if (msblk->swap) {
++				struct squashfs_dir_entry_2 sdire;
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						&sdire, next_block,next_offset,
++						sizeof(sdire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(sdire);
++				SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
++			} else {
++				if (!squashfs_get_cached_block(i->i_sb, (char *)
++						dire, next_block,next_offset,
++						sizeof(*dire), &next_block,
++						&next_offset))
++					goto failed_read;
++
++				length += sizeof(*dire);
++			}
++
++			if (!squashfs_get_cached_block(i->i_sb, dire->name,
++					next_block, next_offset, dire->size + 1,
++					&next_block, &next_offset))
++				goto failed_read;
++
++			length += dire->size + 1;
++
++			if (sorted && name[0] < dire->name[0])
++				goto exit_loop;
++
++			if ((len == dire->size + 1) && !strncmp(name,
++						dire->name, len)) {
++				squashfs_inode_t ino =
++					SQUASHFS_MKINODE(dirh.start_block,
++					dire->offset);
++
++				TRACE("calling squashfs_iget for directory "
++					"entry %s, inode %x:%x, %lld\n", name,
++					dirh.start_block, dire->offset, ino);
++
++				inode = (msblk->iget)(i->i_sb, ino);
++
++				goto exit_loop;
++			}
++		}
++	}
++
++exit_loop:
++	d_add(dentry, inode);
++	return ERR_PTR(0);
++
++failed_read:
++	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
++		next_offset);
++	goto exit_loop;
++}
++
++
++int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
++{
++	struct squashfs_super_block *sblk = &msblk->sblk;
++
++	msblk->iget = squashfs_iget_2;
++	msblk->read_fragment_index_table = read_fragment_index_table_2;
++
++	sblk->bytes_used = sblk->bytes_used_2;
++	sblk->uid_start = sblk->uid_start_2;
++	sblk->guid_start = sblk->guid_start_2;
++	sblk->inode_table_start = sblk->inode_table_start_2;
++	sblk->directory_table_start = sblk->directory_table_start_2;
++	sblk->fragment_table_start = sblk->fragment_table_start_2;
++
++	return 1;
++}
+diff -urN linux-2.6.21.1.old/fs/squashfs/squashfs.h linux-2.6.21.1.dev/fs/squashfs/squashfs.h
+--- linux-2.6.21.1.old/fs/squashfs/squashfs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/squashfs/squashfs.h	2007-05-26 19:00:37.125351152 +0200
+@@ -0,0 +1,86 @@
++/*
++ * Squashfs - a compressed read only filesystem for Linux
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs.h
++ */
++
++#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
++#undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
++#endif
++
++#ifdef SQUASHFS_TRACE
++#define TRACE(s, args...)	printk(KERN_NOTICE "SQUASHFS: "s, ## args)
++#else
++#define TRACE(s, args...)	{}
++#endif
++
++#define ERROR(s, args...)	printk(KERN_ERR "SQUASHFS error: "s, ## args)
++
++#define SERROR(s, args...)	do { \
++				if (!silent) \
++				printk(KERN_ERR "SQUASHFS error: "s, ## args);\
++				} while(0)
++
++#define WARNING(s, args...)	printk(KERN_WARNING "SQUASHFS: "s, ## args)
++
++static inline struct squashfs_inode_info *SQUASHFS_I(struct inode *inode)
++{
++	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
++}
++
++#if defined(CONFIG_SQUASHFS_1_0_COMPATIBILITY ) || defined(CONFIG_SQUASHFS_2_0_COMPATIBILITY)
++#define SQSH_EXTERN
++extern unsigned int squashfs_read_data(struct super_block *s, char *buffer,
++				long long index, unsigned int length,
++				long long *next_index);
++extern int squashfs_get_cached_block(struct super_block *s, char *buffer,
++				long long block, unsigned int offset,
++				int length, long long *next_block,
++				unsigned int *next_offset);
++extern void release_cached_fragment(struct squashfs_sb_info *msblk, struct
++					squashfs_fragment_cache *fragment);
++extern struct squashfs_fragment_cache *get_cached_fragment(struct super_block
++					*s, long long start_block,
++					int length);
++extern struct address_space_operations squashfs_symlink_aops;
++extern struct address_space_operations squashfs_aops;
++extern struct address_space_operations squashfs_aops_4K;
++extern struct inode_operations squashfs_dir_inode_ops;
++#else
++#define SQSH_EXTERN static
++#endif
++
++#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
++extern int squashfs_1_0_supported(struct squashfs_sb_info *msblk);
++#else
++static inline int squashfs_1_0_supported(struct squashfs_sb_info *msblk)
++{
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++extern int squashfs_2_0_supported(struct squashfs_sb_info *msblk);
++#else
++static inline int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
++{
++	return 0;
++}
++#endif
+diff -urN linux-2.6.21.1.old/include/linux/squashfs_fs.h linux-2.6.21.1.dev/include/linux/squashfs_fs.h
+--- linux-2.6.21.1.old/include/linux/squashfs_fs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/squashfs_fs.h	2007-05-26 19:00:37.143348416 +0200
+@@ -0,0 +1,911 @@
++#ifndef SQUASHFS_FS
++#define SQUASHFS_FS
++
++/*
++ * Squashfs
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs_fs.h
++ */
++
++#ifndef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++#define CONFIG_SQUASHFS_2_0_COMPATIBILITY
++#endif
++
++#ifdef	CONFIG_SQUASHFS_VMALLOC
++#define SQUASHFS_ALLOC(a)		vmalloc(a)
++#define SQUASHFS_FREE(a)		vfree(a)
++#else
++#define SQUASHFS_ALLOC(a)		kmalloc(a, GFP_KERNEL)
++#define SQUASHFS_FREE(a)		kfree(a)
++#endif
++#define SQUASHFS_CACHED_FRAGMENTS	CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE
++#define SQUASHFS_MAJOR			3
++#define SQUASHFS_MINOR			0
++#define SQUASHFS_MAGIC			0x73717368
++#define SQUASHFS_MAGIC_SWAP		0x68737173
++#define SQUASHFS_START			0
++
++/* size of metadata (inode and directory) blocks */
++#define SQUASHFS_METADATA_SIZE		8192
++#define SQUASHFS_METADATA_LOG		13
++
++/* default size of data blocks */
++#define SQUASHFS_FILE_SIZE		65536
++#define SQUASHFS_FILE_LOG		16
++
++#define SQUASHFS_FILE_MAX_SIZE		65536
++
++/* Max number of uids and gids */
++#define SQUASHFS_UIDS			256
++#define SQUASHFS_GUIDS			255
++
++/* Max length of filename (not 255) */
++#define SQUASHFS_NAME_LEN		256
++
++#define SQUASHFS_INVALID		((long long) 0xffffffffffff)
++#define SQUASHFS_INVALID_FRAG		((unsigned int) 0xffffffff)
++#define SQUASHFS_INVALID_BLK		((long long) -1)
++#define SQUASHFS_USED_BLK		((long long) -2)
++
++/* Filesystem flags */
++#define SQUASHFS_NOI			0
++#define SQUASHFS_NOD			1
++#define SQUASHFS_CHECK			2
++#define SQUASHFS_NOF			3
++#define SQUASHFS_NO_FRAG		4
++#define SQUASHFS_ALWAYS_FRAG		5
++#define SQUASHFS_DUPLICATE		6
++
++#define SQUASHFS_BIT(flag, bit)		((flag >> bit) & 1)
++
++#define SQUASHFS_UNCOMPRESSED_INODES(flags)	SQUASHFS_BIT(flags, \
++						SQUASHFS_NOI)
++
++#define SQUASHFS_UNCOMPRESSED_DATA(flags)	SQUASHFS_BIT(flags, \
++						SQUASHFS_NOD)
++
++#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
++						SQUASHFS_NOF)
++
++#define SQUASHFS_NO_FRAGMENTS(flags)		SQUASHFS_BIT(flags, \
++						SQUASHFS_NO_FRAG)
++
++#define SQUASHFS_ALWAYS_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
++						SQUASHFS_ALWAYS_FRAG)
++
++#define SQUASHFS_DUPLICATES(flags)		SQUASHFS_BIT(flags, \
++						SQUASHFS_DUPLICATE)
++
++#define SQUASHFS_CHECK_DATA(flags)		SQUASHFS_BIT(flags, \
++						SQUASHFS_CHECK)
++
++#define SQUASHFS_MKFLAGS(noi, nod, check_data, nof, no_frag, always_frag, \
++		duplicate_checking)	(noi | (nod << 1) | (check_data << 2) \
++		| (nof << 3) | (no_frag << 4) | (always_frag << 5) | \
++		(duplicate_checking << 6))
++
++/* Max number of types and file types */
++#define SQUASHFS_DIR_TYPE		1
++#define SQUASHFS_FILE_TYPE		2
++#define SQUASHFS_SYMLINK_TYPE		3
++#define SQUASHFS_BLKDEV_TYPE		4
++#define SQUASHFS_CHRDEV_TYPE		5
++#define SQUASHFS_FIFO_TYPE		6
++#define SQUASHFS_SOCKET_TYPE		7
++#define SQUASHFS_LDIR_TYPE		8
++#define SQUASHFS_LREG_TYPE		9
++
++/* 1.0 filesystem type definitions */
++#define SQUASHFS_TYPES			5
++#define SQUASHFS_IPC_TYPE		0
++
++/* Flag whether block is compressed or uncompressed, bit is set if block is
++ * uncompressed */
++#define SQUASHFS_COMPRESSED_BIT		(1 << 15)
++
++#define SQUASHFS_COMPRESSED_SIZE(B)	(((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
++		(B) & ~SQUASHFS_COMPRESSED_BIT :  SQUASHFS_COMPRESSED_BIT)
++
++#define SQUASHFS_COMPRESSED(B)		(!((B) & SQUASHFS_COMPRESSED_BIT))
++
++#define SQUASHFS_COMPRESSED_BIT_BLOCK		(1 << 24)
++
++#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B)	(((B) & \
++	~SQUASHFS_COMPRESSED_BIT_BLOCK) ? (B) & \
++	~SQUASHFS_COMPRESSED_BIT_BLOCK : SQUASHFS_COMPRESSED_BIT_BLOCK)
++
++#define SQUASHFS_COMPRESSED_BLOCK(B)	(!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
++
++/*
++ * Inode number ops.  Inodes consist of a compressed block number, and an
++ * uncompressed  offset within that block
++ */
++#define SQUASHFS_INODE_BLK(a)		((unsigned int) ((a) >> 16))
++
++#define SQUASHFS_INODE_OFFSET(a)	((unsigned int) ((a) & 0xffff))
++
++#define SQUASHFS_MKINODE(A, B)		((squashfs_inode_t)(((squashfs_inode_t) (A)\
++					<< 16) + (B)))
++
++/* Compute 32 bit VFS inode number from squashfs inode number */
++#define SQUASHFS_MK_VFS_INODE(a, b)	((unsigned int) (((a) << 8) + \
++					((b) >> 2) + 1))
++/* XXX */
++
++/* Translate between VFS mode and squashfs mode */
++#define SQUASHFS_MODE(a)		((a) & 0xfff)
++
++/* fragment and fragment table defines */
++#define SQUASHFS_FRAGMENT_BYTES(A)	(A * sizeof(struct squashfs_fragment_entry))
++
++#define SQUASHFS_FRAGMENT_INDEX(A)	(SQUASHFS_FRAGMENT_BYTES(A) / \
++					SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A)	(SQUASHFS_FRAGMENT_BYTES(A) % \
++						SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEXES(A)	((SQUASHFS_FRAGMENT_BYTES(A) + \
++					SQUASHFS_METADATA_SIZE - 1) / \
++					SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEX_BYTES(A)	(SQUASHFS_FRAGMENT_INDEXES(A) *\
++						sizeof(long long))
++
++/* cached data constants for filesystem */
++#define SQUASHFS_CACHED_BLKS		8
++
++#define SQUASHFS_MAX_FILE_SIZE_LOG	64
++
++#define SQUASHFS_MAX_FILE_SIZE		((long long) 1 << \
++					(SQUASHFS_MAX_FILE_SIZE_LOG - 2))
++
++#define SQUASHFS_MARKER_BYTE		0xff
++
++/* meta index cache */
++#define SQUASHFS_META_INDEXES	(SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
++#define SQUASHFS_META_ENTRIES	31
++#define SQUASHFS_META_NUMBER	8
++#define SQUASHFS_SLOTS		4
++
++struct meta_entry {
++	long long		data_block;
++	unsigned int		index_block;
++	unsigned short		offset;
++	unsigned short		pad;
++};
++
++struct meta_index {
++	unsigned int		inode_number;
++	unsigned int		offset;
++	unsigned short		entries;
++	unsigned short		skip;
++	unsigned short		locked;
++	unsigned short		pad;
++	struct meta_entry	meta_entry[SQUASHFS_META_ENTRIES];
++};
++
++
++/*
++ * definitions for structures on disk
++ */
++
++typedef long long		squashfs_block_t;
++typedef long long		squashfs_inode_t;
++
++struct squashfs_super_block {
++	unsigned int		s_magic;
++	unsigned int		inodes;
++	unsigned int		bytes_used_2;
++	unsigned int		uid_start_2;
++	unsigned int		guid_start_2;
++	unsigned int		inode_table_start_2;
++	unsigned int		directory_table_start_2;
++	unsigned int		s_major:16;
++	unsigned int		s_minor:16;
++	unsigned int		block_size_1:16;
++	unsigned int		block_log:16;
++	unsigned int		flags:8;
++	unsigned int		no_uids:8;
++	unsigned int		no_guids:8;
++	unsigned int		mkfs_time /* time of filesystem creation */;
++	squashfs_inode_t	root_inode;
++	unsigned int		block_size;
++	unsigned int		fragments;
++	unsigned int		fragment_table_start_2;
++	long long		bytes_used;
++	long long		uid_start;
++	long long		guid_start;
++	long long		inode_table_start;
++	long long		directory_table_start;
++	long long		fragment_table_start;
++	long long		unused;
++} __attribute__ ((packed));
++
++struct squashfs_dir_index {
++	unsigned int		index;
++	unsigned int		start_block;
++	unsigned char		size;
++	unsigned char		name[0];
++} __attribute__ ((packed));
++
++#define SQUASHFS_BASE_INODE_HEADER		\
++	unsigned int		inode_type:4;	\
++	unsigned int		mode:12;	\
++	unsigned int		uid:8;		\
++	unsigned int		guid:8;		\
++	unsigned int		mtime;		\
++	unsigned int 		inode_number;
++
++struct squashfs_base_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++} __attribute__ ((packed));
++
++struct squashfs_ipc_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++} __attribute__ ((packed));
++
++struct squashfs_dev_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	unsigned short		rdev;
++} __attribute__ ((packed));
++
++struct squashfs_symlink_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	unsigned short		symlink_size;
++	char			symlink[0];
++} __attribute__ ((packed));
++
++struct squashfs_reg_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	squashfs_block_t	start_block;
++	unsigned int		fragment;
++	unsigned int		offset;
++	unsigned int		file_size;
++	unsigned short		block_list[0];
++} __attribute__ ((packed));
++
++struct squashfs_lreg_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	squashfs_block_t	start_block;
++	unsigned int		fragment;
++	unsigned int		offset;
++	long long		file_size;
++	unsigned short		block_list[0];
++} __attribute__ ((packed));
++
++struct squashfs_dir_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	unsigned int		file_size:19;
++	unsigned int		offset:13;
++	unsigned int		start_block;
++	unsigned int		parent_inode;
++} __attribute__  ((packed));
++
++struct squashfs_ldir_inode_header {
++	SQUASHFS_BASE_INODE_HEADER;
++	unsigned int		nlink;
++	unsigned int		file_size:27;
++	unsigned int		offset:13;
++	unsigned int		start_block;
++	unsigned int		i_count:16;
++	unsigned int		parent_inode;
++	struct squashfs_dir_index	index[0];
++} __attribute__  ((packed));
++
++union squashfs_inode_header {
++	struct squashfs_base_inode_header	base;
++	struct squashfs_dev_inode_header	dev;
++	struct squashfs_symlink_inode_header	symlink;
++	struct squashfs_reg_inode_header	reg;
++	struct squashfs_lreg_inode_header	lreg;
++	struct squashfs_dir_inode_header	dir;
++	struct squashfs_ldir_inode_header	ldir;
++	struct squashfs_ipc_inode_header	ipc;
++};
++
++struct squashfs_dir_entry {
++	unsigned int		offset:13;
++	unsigned int		type:3;
++	unsigned int		size:8;
++	int			inode_number:16;
++	char			name[0];
++} __attribute__ ((packed));
++
++struct squashfs_dir_header {
++	unsigned int		count:8;
++	unsigned int		start_block;
++	unsigned int		inode_number;
++} __attribute__ ((packed));
++
++struct squashfs_fragment_entry {
++	long long		start_block;
++	unsigned int		size;
++	unsigned int		unused;
++} __attribute__ ((packed));
++
++extern int squashfs_uncompress_block(void *d, int dstlen, void *s, int srclen);
++extern int squashfs_uncompress_init(void);
++extern int squashfs_uncompress_exit(void);
++
++/*
++ * macros to convert each packed bitfield structure from little endian to big
++ * endian and vice versa.  These are needed when creating or using a filesystem
++ * on a machine with different byte ordering to the target architecture.
++ *
++ */
++
++#define SQUASHFS_SWAP_START \
++	int bits;\
++	int b_pos;\
++	unsigned long long val;\
++	unsigned char *s;\
++	unsigned char *d;
++
++#define SQUASHFS_SWAP_SUPER_BLOCK(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_super_block));\
++	SQUASHFS_SWAP((s)->s_magic, d, 0, 32);\
++	SQUASHFS_SWAP((s)->inodes, d, 32, 32);\
++	SQUASHFS_SWAP((s)->bytes_used_2, d, 64, 32);\
++	SQUASHFS_SWAP((s)->uid_start_2, d, 96, 32);\
++	SQUASHFS_SWAP((s)->guid_start_2, d, 128, 32);\
++	SQUASHFS_SWAP((s)->inode_table_start_2, d, 160, 32);\
++	SQUASHFS_SWAP((s)->directory_table_start_2, d, 192, 32);\
++	SQUASHFS_SWAP((s)->s_major, d, 224, 16);\
++	SQUASHFS_SWAP((s)->s_minor, d, 240, 16);\
++	SQUASHFS_SWAP((s)->block_size_1, d, 256, 16);\
++	SQUASHFS_SWAP((s)->block_log, d, 272, 16);\
++	SQUASHFS_SWAP((s)->flags, d, 288, 8);\
++	SQUASHFS_SWAP((s)->no_uids, d, 296, 8);\
++	SQUASHFS_SWAP((s)->no_guids, d, 304, 8);\
++	SQUASHFS_SWAP((s)->mkfs_time, d, 312, 32);\
++	SQUASHFS_SWAP((s)->root_inode, d, 344, 64);\
++	SQUASHFS_SWAP((s)->block_size, d, 408, 32);\
++	SQUASHFS_SWAP((s)->fragments, d, 440, 32);\
++	SQUASHFS_SWAP((s)->fragment_table_start_2, d, 472, 32);\
++	SQUASHFS_SWAP((s)->bytes_used, d, 504, 64);\
++	SQUASHFS_SWAP((s)->uid_start, d, 568, 64);\
++	SQUASHFS_SWAP((s)->guid_start, d, 632, 64);\
++	SQUASHFS_SWAP((s)->inode_table_start, d, 696, 64);\
++	SQUASHFS_SWAP((s)->directory_table_start, d, 760, 64);\
++	SQUASHFS_SWAP((s)->fragment_table_start, d, 824, 64);\
++	SQUASHFS_SWAP((s)->unused, d, 888, 64);\
++}
++
++#define SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
++	SQUASHFS_MEMSET(s, d, n);\
++	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
++	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
++	SQUASHFS_SWAP((s)->uid, d, 16, 8);\
++	SQUASHFS_SWAP((s)->guid, d, 24, 8);\
++	SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
++	SQUASHFS_SWAP((s)->inode_number, d, 64, 32);
++
++#define SQUASHFS_SWAP_BASE_INODE_HEADER(s, d, n) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
++}
++
++#define SQUASHFS_SWAP_IPC_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_ipc_inode_header))\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++}
++
++#define SQUASHFS_SWAP_DEV_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_dev_inode_header)); \
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->rdev, d, 128, 16);\
++}
++
++#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_symlink_inode_header));\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->symlink_size, d, 128, 16);\
++}
++
++#define SQUASHFS_SWAP_REG_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_reg_inode_header));\
++	SQUASHFS_SWAP((s)->start_block, d, 96, 64);\
++	SQUASHFS_SWAP((s)->fragment, d, 160, 32);\
++	SQUASHFS_SWAP((s)->offset, d, 192, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 224, 32);\
++}
++
++#define SQUASHFS_SWAP_LREG_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_lreg_inode_header));\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 128, 64);\
++	SQUASHFS_SWAP((s)->fragment, d, 192, 32);\
++	SQUASHFS_SWAP((s)->offset, d, 224, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 256, 64);\
++}
++
++#define SQUASHFS_SWAP_DIR_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_dir_inode_header));\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 128, 19);\
++	SQUASHFS_SWAP((s)->offset, d, 147, 13);\
++	SQUASHFS_SWAP((s)->start_block, d, 160, 32);\
++	SQUASHFS_SWAP((s)->parent_inode, d, 192, 32);\
++}
++
++#define SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
++			sizeof(struct squashfs_ldir_inode_header));\
++	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 128, 27);\
++	SQUASHFS_SWAP((s)->offset, d, 155, 13);\
++	SQUASHFS_SWAP((s)->start_block, d, 168, 32);\
++	SQUASHFS_SWAP((s)->i_count, d, 200, 16);\
++	SQUASHFS_SWAP((s)->parent_inode, d, 216, 32);\
++}
++
++#define SQUASHFS_SWAP_DIR_INDEX(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index));\
++	SQUASHFS_SWAP((s)->index, d, 0, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 32, 32);\
++	SQUASHFS_SWAP((s)->size, d, 64, 8);\
++}
++
++#define SQUASHFS_SWAP_DIR_HEADER(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header));\
++	SQUASHFS_SWAP((s)->count, d, 0, 8);\
++	SQUASHFS_SWAP((s)->start_block, d, 8, 32);\
++	SQUASHFS_SWAP((s)->inode_number, d, 40, 32);\
++}
++
++#define SQUASHFS_SWAP_DIR_ENTRY(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry));\
++	SQUASHFS_SWAP((s)->offset, d, 0, 13);\
++	SQUASHFS_SWAP((s)->type, d, 13, 3);\
++	SQUASHFS_SWAP((s)->size, d, 16, 8);\
++	SQUASHFS_SWAP((s)->inode_number, d, 24, 16);\
++}
++
++#define SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry));\
++	SQUASHFS_SWAP((s)->start_block, d, 0, 64);\
++	SQUASHFS_SWAP((s)->size, d, 64, 32);\
++}
++
++#define SQUASHFS_SWAP_SHORTS(s, d, n) {\
++	int entry;\
++	int bit_position;\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, n * 2);\
++	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
++			16)\
++		SQUASHFS_SWAP(s[entry], d, bit_position, 16);\
++}
++
++#define SQUASHFS_SWAP_INTS(s, d, n) {\
++	int entry;\
++	int bit_position;\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, n * 4);\
++	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
++			32)\
++		SQUASHFS_SWAP(s[entry], d, bit_position, 32);\
++}
++
++#define SQUASHFS_SWAP_LONG_LONGS(s, d, n) {\
++	int entry;\
++	int bit_position;\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, n * 8);\
++	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
++			64)\
++		SQUASHFS_SWAP(s[entry], d, bit_position, 64);\
++}
++
++#define SQUASHFS_SWAP_DATA(s, d, n, bits) {\
++	int entry;\
++	int bit_position;\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, n * bits / 8);\
++	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
++			bits)\
++		SQUASHFS_SWAP(s[entry], d, bit_position, bits);\
++}
++
++#define SQUASHFS_SWAP_FRAGMENT_INDEXES(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
++
++#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
++
++struct squashfs_base_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++} __attribute__ ((packed));
++
++struct squashfs_ipc_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned int		type:4;
++	unsigned int		offset:4;
++} __attribute__ ((packed));
++
++struct squashfs_dev_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned short		rdev;
++} __attribute__ ((packed));
++
++struct squashfs_symlink_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned short		symlink_size;
++	char			symlink[0];
++} __attribute__ ((packed));
++
++struct squashfs_reg_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned int		mtime;
++	unsigned int		start_block;
++	unsigned int		file_size:32;
++	unsigned short		block_list[0];
++} __attribute__ ((packed));
++
++struct squashfs_dir_inode_header_1 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:4; /* index into uid table */
++	unsigned int		guid:4; /* index into guid table */
++	unsigned int		file_size:19;
++	unsigned int		offset:13;
++	unsigned int		mtime;
++	unsigned int		start_block:24;
++} __attribute__  ((packed));
++
++#define SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n) \
++	SQUASHFS_MEMSET(s, d, n);\
++	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
++	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
++	SQUASHFS_SWAP((s)->uid, d, 16, 4);\
++	SQUASHFS_SWAP((s)->guid, d, 20, 4);
++
++#define SQUASHFS_SWAP_BASE_INODE_HEADER_1(s, d, n) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n)\
++}
++
++#define SQUASHFS_SWAP_IPC_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_ipc_inode_header_1));\
++	SQUASHFS_SWAP((s)->type, d, 24, 4);\
++	SQUASHFS_SWAP((s)->offset, d, 28, 4);\
++}
++
++#define SQUASHFS_SWAP_DEV_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_dev_inode_header_1));\
++	SQUASHFS_SWAP((s)->rdev, d, 24, 16);\
++}
++
++#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_symlink_inode_header_1));\
++	SQUASHFS_SWAP((s)->symlink_size, d, 24, 16);\
++}
++
++#define SQUASHFS_SWAP_REG_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_reg_inode_header_1));\
++	SQUASHFS_SWAP((s)->mtime, d, 24, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 56, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 88, 32);\
++}
++
++#define SQUASHFS_SWAP_DIR_INODE_HEADER_1(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
++			sizeof(struct squashfs_dir_inode_header_1));\
++	SQUASHFS_SWAP((s)->file_size, d, 24, 19);\
++	SQUASHFS_SWAP((s)->offset, d, 43, 13);\
++	SQUASHFS_SWAP((s)->mtime, d, 56, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 88, 24);\
++}
++
++#endif
++
++#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++
++struct squashfs_dir_index_2 {
++	unsigned int		index:27;
++	unsigned int		start_block:29;
++	unsigned char		size;
++	unsigned char		name[0];
++} __attribute__ ((packed));
++
++struct squashfs_base_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++} __attribute__ ((packed));
++
++struct squashfs_ipc_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++} __attribute__ ((packed));
++
++struct squashfs_dev_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned short		rdev;
++} __attribute__ ((packed));
++
++struct squashfs_symlink_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned short		symlink_size;
++	char			symlink[0];
++} __attribute__ ((packed));
++
++struct squashfs_reg_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned int		mtime;
++	unsigned int		start_block;
++	unsigned int		fragment;
++	unsigned int		offset;
++	unsigned int		file_size:32;
++	unsigned short		block_list[0];
++} __attribute__ ((packed));
++
++struct squashfs_dir_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned int		file_size:19;
++	unsigned int		offset:13;
++	unsigned int		mtime;
++	unsigned int		start_block:24;
++} __attribute__  ((packed));
++
++struct squashfs_ldir_inode_header_2 {
++	unsigned int		inode_type:4;
++	unsigned int		mode:12; /* protection */
++	unsigned int		uid:8; /* index into uid table */
++	unsigned int		guid:8; /* index into guid table */
++	unsigned int		file_size:27;
++	unsigned int		offset:13;
++	unsigned int		mtime;
++	unsigned int		start_block:24;
++	unsigned int		i_count:16;
++	struct squashfs_dir_index_2	index[0];
++} __attribute__  ((packed));
++
++union squashfs_inode_header_2 {
++	struct squashfs_base_inode_header_2	base;
++	struct squashfs_dev_inode_header_2	dev;
++	struct squashfs_symlink_inode_header_2	symlink;
++	struct squashfs_reg_inode_header_2	reg;
++	struct squashfs_dir_inode_header_2	dir;
++	struct squashfs_ldir_inode_header_2	ldir;
++	struct squashfs_ipc_inode_header_2	ipc;
++};
++
++struct squashfs_dir_header_2 {
++	unsigned int		count:8;
++	unsigned int		start_block:24;
++} __attribute__ ((packed));
++
++struct squashfs_dir_entry_2 {
++	unsigned int		offset:13;
++	unsigned int		type:3;
++	unsigned int		size:8;
++	char			name[0];
++} __attribute__ ((packed));
++
++struct squashfs_fragment_entry_2 {
++	unsigned int		start_block;
++	unsigned int		size;
++} __attribute__ ((packed));
++
++#define SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
++	SQUASHFS_MEMSET(s, d, n);\
++	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
++	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
++	SQUASHFS_SWAP((s)->uid, d, 16, 8);\
++	SQUASHFS_SWAP((s)->guid, d, 24, 8);\
++
++#define SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, n) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
++}
++
++#define SQUASHFS_SWAP_IPC_INODE_HEADER_2(s, d) \
++	SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, sizeof(struct squashfs_ipc_inode_header_2))
++
++#define SQUASHFS_SWAP_DEV_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_dev_inode_header_2)); \
++	SQUASHFS_SWAP((s)->rdev, d, 32, 16);\
++}
++
++#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_symlink_inode_header_2));\
++	SQUASHFS_SWAP((s)->symlink_size, d, 32, 16);\
++}
++
++#define SQUASHFS_SWAP_REG_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_reg_inode_header_2));\
++	SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 64, 32);\
++	SQUASHFS_SWAP((s)->fragment, d, 96, 32);\
++	SQUASHFS_SWAP((s)->offset, d, 128, 32);\
++	SQUASHFS_SWAP((s)->file_size, d, 160, 32);\
++}
++
++#define SQUASHFS_SWAP_DIR_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_dir_inode_header_2));\
++	SQUASHFS_SWAP((s)->file_size, d, 32, 19);\
++	SQUASHFS_SWAP((s)->offset, d, 51, 13);\
++	SQUASHFS_SWAP((s)->mtime, d, 64, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 96, 24);\
++}
++
++#define SQUASHFS_SWAP_LDIR_INODE_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
++			sizeof(struct squashfs_ldir_inode_header_2));\
++	SQUASHFS_SWAP((s)->file_size, d, 32, 27);\
++	SQUASHFS_SWAP((s)->offset, d, 59, 13);\
++	SQUASHFS_SWAP((s)->mtime, d, 72, 32);\
++	SQUASHFS_SWAP((s)->start_block, d, 104, 24);\
++	SQUASHFS_SWAP((s)->i_count, d, 128, 16);\
++}
++
++#define SQUASHFS_SWAP_DIR_INDEX_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index_2));\
++	SQUASHFS_SWAP((s)->index, d, 0, 27);\
++	SQUASHFS_SWAP((s)->start_block, d, 27, 29);\
++	SQUASHFS_SWAP((s)->size, d, 56, 8);\
++}
++#define SQUASHFS_SWAP_DIR_HEADER_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header_2));\
++	SQUASHFS_SWAP((s)->count, d, 0, 8);\
++	SQUASHFS_SWAP((s)->start_block, d, 8, 24);\
++}
++
++#define SQUASHFS_SWAP_DIR_ENTRY_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry_2));\
++	SQUASHFS_SWAP((s)->offset, d, 0, 13);\
++	SQUASHFS_SWAP((s)->type, d, 13, 3);\
++	SQUASHFS_SWAP((s)->size, d, 16, 8);\
++}
++
++#define SQUASHFS_SWAP_FRAGMENT_ENTRY_2(s, d) {\
++	SQUASHFS_SWAP_START\
++	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry_2));\
++	SQUASHFS_SWAP((s)->start_block, d, 0, 32);\
++	SQUASHFS_SWAP((s)->size, d, 32, 32);\
++}
++
++#define SQUASHFS_SWAP_FRAGMENT_INDEXES_2(s, d, n) SQUASHFS_SWAP_INTS(s, d, n)
++
++/* fragment and fragment table defines */
++#define SQUASHFS_FRAGMENT_BYTES_2(A)	(A * sizeof(struct squashfs_fragment_entry_2))
++
++#define SQUASHFS_FRAGMENT_INDEX_2(A)	(SQUASHFS_FRAGMENT_BYTES_2(A) / \
++					SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEX_OFFSET_2(A)	(SQUASHFS_FRAGMENT_BYTES_2(A) % \
++						SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEXES_2(A)	((SQUASHFS_FRAGMENT_BYTES_2(A) + \
++					SQUASHFS_METADATA_SIZE - 1) / \
++					SQUASHFS_METADATA_SIZE)
++
++#define SQUASHFS_FRAGMENT_INDEX_BYTES_2(A)	(SQUASHFS_FRAGMENT_INDEXES_2(A) *\
++						sizeof(int))
++
++#endif
++
++#ifdef __KERNEL__
++
++/*
++ * macros used to swap each structure entry, taking into account
++ * bitfields and different bitfield placing conventions on differing
++ * architectures
++ */
++
++#include <asm/byteorder.h>
++
++#ifdef __BIG_ENDIAN
++	/* convert from little endian to big endian */
++#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
++		tbits, b_pos)
++#else
++	/* convert from big endian to little endian */
++#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
++		tbits, 64 - tbits - b_pos)
++#endif
++
++#define _SQUASHFS_SWAP(value, p, pos, tbits, SHIFT) {\
++	b_pos = pos % 8;\
++	val = 0;\
++	s = (unsigned char *)p + (pos / 8);\
++	d = ((unsigned char *) &val) + 7;\
++	for(bits = 0; bits < (tbits + b_pos); bits += 8) \
++		*d-- = *s++;\
++	value = (val >> (SHIFT))/* & ((1 << tbits) - 1)*/;\
++}
++
++#define SQUASHFS_MEMSET(s, d, n)	memset(s, 0, n);
++
++#endif
++#endif
+diff -urN linux-2.6.21.1.old/include/linux/squashfs_fs_i.h linux-2.6.21.1.dev/include/linux/squashfs_fs_i.h
+--- linux-2.6.21.1.old/include/linux/squashfs_fs_i.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/squashfs_fs_i.h	2007-05-26 19:00:37.143348416 +0200
+@@ -0,0 +1,45 @@
++#ifndef SQUASHFS_FS_I
++#define SQUASHFS_FS_I
++/*
++ * Squashfs
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs_fs_i.h
++ */
++
++struct squashfs_inode_info {
++	long long	start_block;
++	unsigned int	offset;
++	union {
++		struct {
++			long long	fragment_start_block;
++			unsigned int	fragment_size;
++			unsigned int	fragment_offset;
++			long long	block_list_start;
++		} s1;
++		struct {
++			long long	directory_index_start;
++			unsigned int	directory_index_offset;
++			unsigned int	directory_index_count;
++			unsigned int	parent_inode;
++		} s2;
++	} u;
++	struct inode	vfs_inode;
++};
++#endif
+diff -urN linux-2.6.21.1.old/include/linux/squashfs_fs_sb.h linux-2.6.21.1.dev/include/linux/squashfs_fs_sb.h
+--- linux-2.6.21.1.old/include/linux/squashfs_fs_sb.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/squashfs_fs_sb.h	2007-05-26 19:00:37.144348264 +0200
+@@ -0,0 +1,74 @@
++#ifndef SQUASHFS_FS_SB
++#define SQUASHFS_FS_SB
++/*
++ * Squashfs
++ *
++ * Copyright (c) 2002, 2003, 2004, 2005, 2006
++ * Phillip Lougher <phillip@lougher.org.uk>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2,
++ * or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * squashfs_fs_sb.h
++ */
++
++#include <linux/squashfs_fs.h>
++
++struct squashfs_cache {
++	long long	block;
++	int		length;
++	long long	next_index;
++	char		*data;
++};
++
++struct squashfs_fragment_cache {
++	long long	block;
++	int		length;
++	unsigned int	locked;
++	char		*data;
++};
++
++struct squashfs_sb_info {
++	struct squashfs_super_block	sblk;
++	int			devblksize;
++	int			devblksize_log2;
++	int			swap;
++	struct squashfs_cache	*block_cache;
++	struct squashfs_fragment_cache	*fragment;
++	int			next_cache;
++	int			next_fragment;
++	int			next_meta_index;
++	unsigned int		*uid;
++	unsigned int		*guid;
++	long long		*fragment_index;
++	unsigned int		*fragment_index_2;
++	unsigned int		read_size;
++	char			*read_data;
++	char			*read_page;
++	struct semaphore	read_data_mutex;
++	struct semaphore	read_page_mutex;
++	struct semaphore	block_cache_mutex;
++	struct semaphore	fragment_mutex;
++	struct semaphore	meta_index_mutex;
++	wait_queue_head_t	waitq;
++	wait_queue_head_t	fragment_wait_queue;
++	struct meta_index	*meta_index;
++	struct inode		*(*iget)(struct super_block *s,  squashfs_inode_t \
++				inode);
++	long long		(*read_blocklist)(struct inode *inode, int \
++				index, int readahead_blks, char *block_list, \
++				unsigned short **block_p, unsigned int *bsize);
++	int			(*read_fragment_index_table)(struct super_block *s);
++};
++#endif
+diff -urN linux-2.6.21.1.old/init/do_mounts_rd.c linux-2.6.21.1.dev/init/do_mounts_rd.c
+--- linux-2.6.21.1.old/init/do_mounts_rd.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/init/do_mounts_rd.c	2007-05-26 19:00:37.144348264 +0200
+@@ -5,6 +5,7 @@
+ #include <linux/ext2_fs.h>
+ #include <linux/romfs_fs.h>
+ #include <linux/cramfs_fs.h>
++#include <linux/squashfs_fs.h>
+ #include <linux/initrd.h>
+ #include <linux/string.h>
+ 
+@@ -39,6 +40,7 @@
+  * numbers could not be found.
+  *
+  * We currently check for the following magic numbers:
++ *      squashfs
+  * 	minix
+  * 	ext2
+  *	romfs
+@@ -53,6 +55,7 @@
+ 	struct ext2_super_block *ext2sb;
+ 	struct romfs_super_block *romfsb;
+ 	struct cramfs_super *cramfsb;
++	struct squashfs_super_block *squashfsb;
+ 	int nblocks = -1;
+ 	unsigned char *buf;
+ 
+@@ -64,6 +67,7 @@
+ 	ext2sb = (struct ext2_super_block *) buf;
+ 	romfsb = (struct romfs_super_block *) buf;
+ 	cramfsb = (struct cramfs_super *) buf;
++	squashfsb = (struct squashfs_super_block *) buf;
+ 	memset(buf, 0xe5, size);
+ 
+ 	/*
+@@ -101,6 +105,15 @@
+ 		goto done;
+ 	}
+ 
++	/* squashfs is at block zero too */
++	if (squashfsb->s_magic == SQUASHFS_MAGIC) {
++		printk(KERN_NOTICE
++		       "RAMDISK: squashfs filesystem found at block %d\n",
++		       start_block);
++		nblocks = (squashfsb->bytes_used+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS;
++		goto done;
++	}
++
+ 	/*
+ 	 * Read block 1 to test for minix and ext2 superblock
+ 	 */
diff --git a/target/linux/generic-2.6/patches-2.6.22/002-lzma_decompress.patch b/target/linux/generic-2.6/patches-2.6.22/002-lzma_decompress.patch
new file mode 100644
index 0000000000..d141b6347b
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/002-lzma_decompress.patch
@@ -0,0 +1,791 @@
+diff -urN linux-2.6.21.1.old/include/linux/LzmaDecode.h linux-2.6.21.1.dev/include/linux/LzmaDecode.h
+--- linux-2.6.21.1.old/include/linux/LzmaDecode.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/LzmaDecode.h	2007-05-26 19:03:45.705682584 +0200
+@@ -0,0 +1,100 @@
++/*
++  LzmaDecode.h
++  LZMA Decoder interface
++
++  LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
++  http://www.7-zip.org/
++
++  LZMA SDK is licensed under two licenses:
++  1) GNU Lesser General Public License (GNU LGPL)
++  2) Common Public License (CPL)
++  It means that you can select one of these two licenses and
++  follow rules of that license.
++
++  SPECIAL EXCEPTION:
++  Igor Pavlov, as the author of this code, expressly permits you to
++  statically or dynamically link your code (or bind by name) to the
++  interfaces of this file without subjecting your linked code to the
++  terms of the CPL or GNU LGPL. Any modifications or additions
++  to this file, however, are subject to the LGPL or CPL terms.
++*/
++
++#ifndef __LZMADECODE_H
++#define __LZMADECODE_H
++
++/* #define _LZMA_IN_CB */
++/* Use callback for input data */
++
++/* #define _LZMA_OUT_READ */
++/* Use read function for output data */
++
++/* #define _LZMA_PROB32 */
++/* It can increase speed on some 32-bit CPUs,
++   but memory usage will be doubled in that case */
++
++/* #define _LZMA_LOC_OPT */
++/* Enable local speed optimizations inside code */
++
++#ifndef UInt32
++#ifdef _LZMA_UINT32_IS_ULONG
++#define UInt32 unsigned long
++#else
++#define UInt32 unsigned int
++#endif
++#endif
++
++#ifdef _LZMA_PROB32
++#define CProb UInt32
++#else
++#define CProb unsigned short
++#endif
++
++#define LZMA_RESULT_OK 0
++#define LZMA_RESULT_DATA_ERROR 1
++#define LZMA_RESULT_NOT_ENOUGH_MEM 2
++
++#ifdef _LZMA_IN_CB
++typedef struct _ILzmaInCallback
++{
++  int (*Read)(void *object, unsigned char **buffer, UInt32 *bufferSize);
++} ILzmaInCallback;
++#endif
++
++#define LZMA_BASE_SIZE 1846
++#define LZMA_LIT_SIZE 768
++
++/*
++bufferSize = (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)))* sizeof(CProb)
++bufferSize += 100 in case of _LZMA_OUT_READ
++by default CProb is unsigned short,
++but if specify _LZMA_PROB_32, CProb will be UInt32(unsigned int)
++*/
++
++#ifdef _LZMA_OUT_READ
++int LzmaDecoderInit(
++    unsigned char *buffer, UInt32 bufferSize,
++    int lc, int lp, int pb,
++    unsigned char *dictionary, UInt32 dictionarySize,
++  #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback
++  #else
++    unsigned char *inStream, UInt32 inSize
++  #endif
++);
++#endif
++
++int LzmaDecode(
++    unsigned char *buffer,
++  #ifndef _LZMA_OUT_READ
++    UInt32 bufferSize,
++    int lc, int lp, int pb,
++  #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback,
++  #else
++    unsigned char *inStream, UInt32 inSize,
++  #endif
++  #endif
++    unsigned char *outStream, UInt32 outSize,
++    UInt32 *outSizeProcessed);
++
++#endif
+diff -urN linux-2.6.21.1.old/lib/LzmaDecode.c linux-2.6.21.1.dev/lib/LzmaDecode.c
+--- linux-2.6.21.1.old/lib/LzmaDecode.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/lib/LzmaDecode.c	2007-05-26 19:03:45.706682432 +0200
+@@ -0,0 +1,663 @@
++/*
++  LzmaDecode.c
++  LZMA Decoder
++
++  LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
++  http://www.7-zip.org/
++
++  LZMA SDK is licensed under two licenses:
++  1) GNU Lesser General Public License (GNU LGPL)
++  2) Common Public License (CPL)
++  It means that you can select one of these two licenses and
++  follow rules of that license.
++
++  SPECIAL EXCEPTION:
++  Igor Pavlov, as the author of this code, expressly permits you to
++  statically or dynamically link your code (or bind by name) to the
++  interfaces of this file without subjecting your linked code to the
++  terms of the CPL or GNU LGPL. Any modifications or additions
++  to this file, however, are subject to the LGPL or CPL terms.
++*/
++
++#include <linux/LzmaDecode.h>
++
++#ifndef Byte
++#define Byte unsigned char
++#endif
++
++#define kNumTopBits 24
++#define kTopValue ((UInt32)1 << kNumTopBits)
++
++#define kNumBitModelTotalBits 11
++#define kBitModelTotal (1 << kNumBitModelTotalBits)
++#define kNumMoveBits 5
++
++typedef struct _CRangeDecoder
++{
++  Byte *Buffer;
++  Byte *BufferLim;
++  UInt32 Range;
++  UInt32 Code;
++  #ifdef _LZMA_IN_CB
++  ILzmaInCallback *InCallback;
++  int Result;
++  #endif
++  int ExtraBytes;
++} CRangeDecoder;
++
++Byte RangeDecoderReadByte(CRangeDecoder *rd)
++{
++  if (rd->Buffer == rd->BufferLim)
++  {
++    #ifdef _LZMA_IN_CB
++    UInt32 size;
++    rd->Result = rd->InCallback->Read(rd->InCallback, &rd->Buffer, &size);
++    rd->BufferLim = rd->Buffer + size;
++    if (size == 0)
++    #endif
++    {
++      rd->ExtraBytes = 1;
++      return 0xFF;
++    }
++  }
++  return (*rd->Buffer++);
++}
++
++/* #define ReadByte (*rd->Buffer++) */
++#define ReadByte (RangeDecoderReadByte(rd))
++
++void RangeDecoderInit(CRangeDecoder *rd,
++  #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback
++  #else
++    Byte *stream, UInt32 bufferSize
++  #endif
++    )
++{
++  int i;
++  #ifdef _LZMA_IN_CB
++  rd->InCallback = inCallback;
++  rd->Buffer = rd->BufferLim = 0;
++  #else
++  rd->Buffer = stream;
++  rd->BufferLim = stream + bufferSize;
++  #endif
++  rd->ExtraBytes = 0;
++  rd->Code = 0;
++  rd->Range = (0xFFFFFFFF);
++  for(i = 0; i < 5; i++)
++    rd->Code = (rd->Code << 8) | ReadByte;
++}
++
++#define RC_INIT_VAR UInt32 range = rd->Range; UInt32 code = rd->Code;
++#define RC_FLUSH_VAR rd->Range = range; rd->Code = code;
++#define RC_NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | ReadByte; }
++
++UInt32 RangeDecoderDecodeDirectBits(CRangeDecoder *rd, int numTotalBits)
++{
++  RC_INIT_VAR
++  UInt32 result = 0;
++  int i;
++  for (i = numTotalBits; i > 0; i--)
++  {
++    /* UInt32 t; */
++    range >>= 1;
++
++    result <<= 1;
++    if (code >= range)
++    {
++      code -= range;
++      result |= 1;
++    }
++    /*
++    t = (code - range) >> 31;
++    t &= 1;
++    code -= range & (t - 1);
++    result = (result + result) | (1 - t);
++    */
++    RC_NORMALIZE
++  }
++  RC_FLUSH_VAR
++  return result;
++}
++
++int RangeDecoderBitDecode(CProb *prob, CRangeDecoder *rd)
++{
++  UInt32 bound = (rd->Range >> kNumBitModelTotalBits) * *prob;
++  if (rd->Code < bound)
++  {
++    rd->Range = bound;
++    *prob += (kBitModelTotal - *prob) >> kNumMoveBits;
++    if (rd->Range < kTopValue)
++    {
++      rd->Code = (rd->Code << 8) | ReadByte;
++      rd->Range <<= 8;
++    }
++    return 0;
++  }
++  else
++  {
++    rd->Range -= bound;
++    rd->Code -= bound;
++    *prob -= (*prob) >> kNumMoveBits;
++    if (rd->Range < kTopValue)
++    {
++      rd->Code = (rd->Code << 8) | ReadByte;
++      rd->Range <<= 8;
++    }
++    return 1;
++  }
++}
++
++#define RC_GET_BIT2(prob, mi, A0, A1) \
++  UInt32 bound = (range >> kNumBitModelTotalBits) * *prob; \
++  if (code < bound) \
++    { A0; range = bound; *prob += (kBitModelTotal - *prob) >> kNumMoveBits; mi <<= 1; } \
++  else \
++    { A1; range -= bound; code -= bound; *prob -= (*prob) >> kNumMoveBits; mi = (mi + mi) + 1; } \
++  RC_NORMALIZE
++
++#define RC_GET_BIT(prob, mi) RC_GET_BIT2(prob, mi, ; , ;)
++
++int RangeDecoderBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
++{
++  int mi = 1;
++  int i;
++  #ifdef _LZMA_LOC_OPT
++  RC_INIT_VAR
++  #endif
++  for(i = numLevels; i > 0; i--)
++  {
++    #ifdef _LZMA_LOC_OPT
++    CProb *prob = probs + mi;
++    RC_GET_BIT(prob, mi)
++    #else
++    mi = (mi + mi) + RangeDecoderBitDecode(probs + mi, rd);
++    #endif
++  }
++  #ifdef _LZMA_LOC_OPT
++  RC_FLUSH_VAR
++  #endif
++  return mi - (1 << numLevels);
++}
++
++int RangeDecoderReverseBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
++{
++  int mi = 1;
++  int i;
++  int symbol = 0;
++  #ifdef _LZMA_LOC_OPT
++  RC_INIT_VAR
++  #endif
++  for(i = 0; i < numLevels; i++)
++  {
++    #ifdef _LZMA_LOC_OPT
++    CProb *prob = probs + mi;
++    RC_GET_BIT2(prob, mi, ; , symbol |= (1 << i))
++    #else
++    int bit = RangeDecoderBitDecode(probs + mi, rd);
++    mi = mi + mi + bit;
++    symbol |= (bit << i);
++    #endif
++  }
++  #ifdef _LZMA_LOC_OPT
++  RC_FLUSH_VAR
++  #endif
++  return symbol;
++}
++
++Byte LzmaLiteralDecode(CProb *probs, CRangeDecoder *rd)
++{
++  int symbol = 1;
++  #ifdef _LZMA_LOC_OPT
++  RC_INIT_VAR
++  #endif
++  do
++  {
++    #ifdef _LZMA_LOC_OPT
++    CProb *prob = probs + symbol;
++    RC_GET_BIT(prob, symbol)
++    #else
++    symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
++    #endif
++  }
++  while (symbol < 0x100);
++  #ifdef _LZMA_LOC_OPT
++  RC_FLUSH_VAR
++  #endif
++  return symbol;
++}
++
++Byte LzmaLiteralDecodeMatch(CProb *probs, CRangeDecoder *rd, Byte matchByte)
++{
++  int symbol = 1;
++  #ifdef _LZMA_LOC_OPT
++  RC_INIT_VAR
++  #endif
++  do
++  {
++    int bit;
++    int matchBit = (matchByte >> 7) & 1;
++    matchByte <<= 1;
++    #ifdef _LZMA_LOC_OPT
++    {
++      CProb *prob = probs + ((1 + matchBit) << 8) + symbol;
++      RC_GET_BIT2(prob, symbol, bit = 0, bit = 1)
++    }
++    #else
++    bit = RangeDecoderBitDecode(probs + ((1 + matchBit) << 8) + symbol, rd);
++    symbol = (symbol << 1) | bit;
++    #endif
++    if (matchBit != bit)
++    {
++      while (symbol < 0x100)
++      {
++        #ifdef _LZMA_LOC_OPT
++        CProb *prob = probs + symbol;
++        RC_GET_BIT(prob, symbol)
++        #else
++        symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
++        #endif
++      }
++      break;
++    }
++  }
++  while (symbol < 0x100);
++  #ifdef _LZMA_LOC_OPT
++  RC_FLUSH_VAR
++  #endif
++  return symbol;
++}
++
++#define kNumPosBitsMax 4
++#define kNumPosStatesMax (1 << kNumPosBitsMax)
++
++#define kLenNumLowBits 3
++#define kLenNumLowSymbols (1 << kLenNumLowBits)
++#define kLenNumMidBits 3
++#define kLenNumMidSymbols (1 << kLenNumMidBits)
++#define kLenNumHighBits 8
++#define kLenNumHighSymbols (1 << kLenNumHighBits)
++
++#define LenChoice 0
++#define LenChoice2 (LenChoice + 1)
++#define LenLow (LenChoice2 + 1)
++#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
++#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
++#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
++
++int LzmaLenDecode(CProb *p, CRangeDecoder *rd, int posState)
++{
++  if(RangeDecoderBitDecode(p + LenChoice, rd) == 0)
++    return RangeDecoderBitTreeDecode(p + LenLow +
++        (posState << kLenNumLowBits), kLenNumLowBits, rd);
++  if(RangeDecoderBitDecode(p + LenChoice2, rd) == 0)
++    return kLenNumLowSymbols + RangeDecoderBitTreeDecode(p + LenMid +
++        (posState << kLenNumMidBits), kLenNumMidBits, rd);
++  return kLenNumLowSymbols + kLenNumMidSymbols +
++      RangeDecoderBitTreeDecode(p + LenHigh, kLenNumHighBits, rd);
++}
++
++#define kNumStates 12
++
++#define kStartPosModelIndex 4
++#define kEndPosModelIndex 14
++#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
++
++#define kNumPosSlotBits 6
++#define kNumLenToPosStates 4
++
++#define kNumAlignBits 4
++#define kAlignTableSize (1 << kNumAlignBits)
++
++#define kMatchMinLen 2
++
++#define IsMatch 0
++#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
++#define IsRepG0 (IsRep + kNumStates)
++#define IsRepG1 (IsRepG0 + kNumStates)
++#define IsRepG2 (IsRepG1 + kNumStates)
++#define IsRep0Long (IsRepG2 + kNumStates)
++#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
++#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
++#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
++#define LenCoder (Align + kAlignTableSize)
++#define RepLenCoder (LenCoder + kNumLenProbs)
++#define Literal (RepLenCoder + kNumLenProbs)
++
++#if Literal != LZMA_BASE_SIZE
++StopCompilingDueBUG
++#endif
++
++#ifdef _LZMA_OUT_READ
++
++typedef struct _LzmaVarState
++{
++  CRangeDecoder RangeDecoder;
++  Byte *Dictionary;
++  UInt32 DictionarySize;
++  UInt32 DictionaryPos;
++  UInt32 GlobalPos;
++  UInt32 Reps[4];
++  int lc;
++  int lp;
++  int pb;
++  int State;
++  int PreviousIsMatch;
++  int RemainLen;
++} LzmaVarState;
++
++int LzmaDecoderInit(
++    unsigned char *buffer, UInt32 bufferSize,
++    int lc, int lp, int pb,
++    unsigned char *dictionary, UInt32 dictionarySize,
++    #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback
++    #else
++    unsigned char *inStream, UInt32 inSize
++    #endif
++    )
++{
++  LzmaVarState *vs = (LzmaVarState *)buffer;
++  CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
++  UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
++  UInt32 i;
++  if (bufferSize < numProbs * sizeof(CProb) + sizeof(LzmaVarState))
++    return LZMA_RESULT_NOT_ENOUGH_MEM;
++  vs->Dictionary = dictionary;
++  vs->DictionarySize = dictionarySize;
++  vs->DictionaryPos = 0;
++  vs->GlobalPos = 0;
++  vs->Reps[0] = vs->Reps[1] = vs->Reps[2] = vs->Reps[3] = 1;
++  vs->lc = lc;
++  vs->lp = lp;
++  vs->pb = pb;
++  vs->State = 0;
++  vs->PreviousIsMatch = 0;
++  vs->RemainLen = 0;
++  dictionary[dictionarySize - 1] = 0;
++  for (i = 0; i < numProbs; i++)
++    p[i] = kBitModelTotal >> 1;
++  RangeDecoderInit(&vs->RangeDecoder,
++      #ifdef _LZMA_IN_CB
++      inCallback
++      #else
++      inStream, inSize
++      #endif
++  );
++  return LZMA_RESULT_OK;
++}
++
++int LzmaDecode(unsigned char *buffer,
++    unsigned char *outStream, UInt32 outSize,
++    UInt32 *outSizeProcessed)
++{
++  LzmaVarState *vs = (LzmaVarState *)buffer;
++  CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
++  CRangeDecoder rd = vs->RangeDecoder;
++  int state = vs->State;
++  int previousIsMatch = vs->PreviousIsMatch;
++  Byte previousByte;
++  UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
++  UInt32 nowPos = 0;
++  UInt32 posStateMask = (1 << (vs->pb)) - 1;
++  UInt32 literalPosMask = (1 << (vs->lp)) - 1;
++  int lc = vs->lc;
++  int len = vs->RemainLen;
++  UInt32 globalPos = vs->GlobalPos;
++
++  Byte *dictionary = vs->Dictionary;
++  UInt32 dictionarySize = vs->DictionarySize;
++  UInt32 dictionaryPos = vs->DictionaryPos;
++
++  if (len == -1)
++  {
++    *outSizeProcessed = 0;
++    return LZMA_RESULT_OK;
++  }
++
++  while(len > 0 && nowPos < outSize)
++  {
++    UInt32 pos = dictionaryPos - rep0;
++    if (pos >= dictionarySize)
++      pos += dictionarySize;
++    outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
++    if (++dictionaryPos == dictionarySize)
++      dictionaryPos = 0;
++    len--;
++  }
++  if (dictionaryPos == 0)
++    previousByte = dictionary[dictionarySize - 1];
++  else
++    previousByte = dictionary[dictionaryPos - 1];
++#else
++
++int LzmaDecode(
++    Byte *buffer, UInt32 bufferSize,
++    int lc, int lp, int pb,
++    #ifdef _LZMA_IN_CB
++    ILzmaInCallback *inCallback,
++    #else
++    unsigned char *inStream, UInt32 inSize,
++    #endif
++    unsigned char *outStream, UInt32 outSize,
++    UInt32 *outSizeProcessed)
++{
++  UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
++  CProb *p = (CProb *)buffer;
++  CRangeDecoder rd;
++  UInt32 i;
++  int state = 0;
++  int previousIsMatch = 0;
++  Byte previousByte = 0;
++  UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
++  UInt32 nowPos = 0;
++  UInt32 posStateMask = (1 << pb) - 1;
++  UInt32 literalPosMask = (1 << lp) - 1;
++  int len = 0;
++  if (bufferSize < numProbs * sizeof(CProb))
++    return LZMA_RESULT_NOT_ENOUGH_MEM;
++  for (i = 0; i < numProbs; i++)
++    p[i] = kBitModelTotal >> 1;
++  RangeDecoderInit(&rd,
++      #ifdef _LZMA_IN_CB
++      inCallback
++      #else
++      inStream, inSize
++      #endif
++      );
++#endif
++
++  *outSizeProcessed = 0;
++  while(nowPos < outSize)
++  {
++    int posState = (int)(
++        (nowPos
++        #ifdef _LZMA_OUT_READ
++        + globalPos
++        #endif
++        )
++        & posStateMask);
++    #ifdef _LZMA_IN_CB
++    if (rd.Result != LZMA_RESULT_OK)
++      return rd.Result;
++    #endif
++    if (rd.ExtraBytes != 0)
++      return LZMA_RESULT_DATA_ERROR;
++    if (RangeDecoderBitDecode(p + IsMatch + (state << kNumPosBitsMax) + posState, &rd) == 0)
++    {
++      CProb *probs = p + Literal + (LZMA_LIT_SIZE *
++        (((
++        (nowPos
++        #ifdef _LZMA_OUT_READ
++        + globalPos
++        #endif
++        )
++        & literalPosMask) << lc) + (previousByte >> (8 - lc))));
++
++      if (state < 4) state = 0;
++      else if (state < 10) state -= 3;
++      else state -= 6;
++      if (previousIsMatch)
++      {
++        Byte matchByte;
++        #ifdef _LZMA_OUT_READ
++        UInt32 pos = dictionaryPos - rep0;
++        if (pos >= dictionarySize)
++          pos += dictionarySize;
++        matchByte = dictionary[pos];
++        #else
++        matchByte = outStream[nowPos - rep0];
++        #endif
++        previousByte = LzmaLiteralDecodeMatch(probs, &rd, matchByte);
++        previousIsMatch = 0;
++      }
++      else
++        previousByte = LzmaLiteralDecode(probs, &rd);
++      outStream[nowPos++] = previousByte;
++      #ifdef _LZMA_OUT_READ
++      dictionary[dictionaryPos] = previousByte;
++      if (++dictionaryPos == dictionarySize)
++        dictionaryPos = 0;
++      #endif
++    }
++    else
++    {
++      previousIsMatch = 1;
++      if (RangeDecoderBitDecode(p + IsRep + state, &rd) == 1)
++      {
++        if (RangeDecoderBitDecode(p + IsRepG0 + state, &rd) == 0)
++        {
++          if (RangeDecoderBitDecode(p + IsRep0Long + (state << kNumPosBitsMax) + posState, &rd) == 0)
++          {
++            #ifdef _LZMA_OUT_READ
++            UInt32 pos;
++            #endif
++            if (
++               (nowPos
++                #ifdef _LZMA_OUT_READ
++                + globalPos
++                #endif
++               )
++               == 0)
++              return LZMA_RESULT_DATA_ERROR;
++            state = state < 7 ? 9 : 11;
++            #ifdef _LZMA_OUT_READ
++            pos = dictionaryPos - rep0;
++            if (pos >= dictionarySize)
++              pos += dictionarySize;
++            previousByte = dictionary[pos];
++            dictionary[dictionaryPos] = previousByte;
++            if (++dictionaryPos == dictionarySize)
++              dictionaryPos = 0;
++            #else
++            previousByte = outStream[nowPos - rep0];
++            #endif
++            outStream[nowPos++] = previousByte;
++            continue;
++          }
++        }
++        else
++        {
++          UInt32 distance;
++          if(RangeDecoderBitDecode(p + IsRepG1 + state, &rd) == 0)
++            distance = rep1;
++          else
++          {
++            if(RangeDecoderBitDecode(p + IsRepG2 + state, &rd) == 0)
++              distance = rep2;
++            else
++            {
++              distance = rep3;
++              rep3 = rep2;
++            }
++            rep2 = rep1;
++          }
++          rep1 = rep0;
++          rep0 = distance;
++        }
++        len = LzmaLenDecode(p + RepLenCoder, &rd, posState);
++        state = state < 7 ? 8 : 11;
++      }
++      else
++      {
++        int posSlot;
++        rep3 = rep2;
++        rep2 = rep1;
++        rep1 = rep0;
++        state = state < 7 ? 7 : 10;
++        len = LzmaLenDecode(p + LenCoder, &rd, posState);
++        posSlot = RangeDecoderBitTreeDecode(p + PosSlot +
++            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
++            kNumPosSlotBits), kNumPosSlotBits, &rd);
++        if (posSlot >= kStartPosModelIndex)
++        {
++          int numDirectBits = ((posSlot >> 1) - 1);
++          rep0 = ((2 | ((UInt32)posSlot & 1)) << numDirectBits);
++          if (posSlot < kEndPosModelIndex)
++          {
++            rep0 += RangeDecoderReverseBitTreeDecode(
++                p + SpecPos + rep0 - posSlot - 1, numDirectBits, &rd);
++          }
++          else
++          {
++            rep0 += RangeDecoderDecodeDirectBits(&rd,
++                numDirectBits - kNumAlignBits) << kNumAlignBits;
++            rep0 += RangeDecoderReverseBitTreeDecode(p + Align, kNumAlignBits, &rd);
++          }
++        }
++        else
++          rep0 = posSlot;
++        rep0++;
++      }
++      if (rep0 == (UInt32)(0))
++      {
++        /* it's for stream version */
++        len = -1;
++        break;
++      }
++      if (rep0 > nowPos
++        #ifdef _LZMA_OUT_READ
++        + globalPos
++        #endif
++        )
++      {
++        return LZMA_RESULT_DATA_ERROR;
++      }
++      len += kMatchMinLen;
++      do
++      {
++        #ifdef _LZMA_OUT_READ
++        UInt32 pos = dictionaryPos - rep0;
++        if (pos >= dictionarySize)
++          pos += dictionarySize;
++        previousByte = dictionary[pos];
++        dictionary[dictionaryPos] = previousByte;
++        if (++dictionaryPos == dictionarySize)
++          dictionaryPos = 0;
++        #else
++        previousByte = outStream[nowPos - rep0];
++        #endif
++        outStream[nowPos++] = previousByte;
++        len--;
++      }
++      while(len > 0 && nowPos < outSize);
++    }
++  }
++
++  #ifdef _LZMA_OUT_READ
++  vs->RangeDecoder = rd;
++  vs->DictionaryPos = dictionaryPos;
++  vs->GlobalPos = globalPos + nowPos;
++  vs->Reps[0] = rep0;
++  vs->Reps[1] = rep1;
++  vs->Reps[2] = rep2;
++  vs->Reps[3] = rep3;
++  vs->State = state;
++  vs->PreviousIsMatch = previousIsMatch;
++  vs->RemainLen = len;
++  #endif
++
++  *outSizeProcessed = nowPos;
++  return LZMA_RESULT_OK;
++}
+diff -urN linux-2.6.21.1.old/lib/Makefile linux-2.6.21.1.dev/lib/Makefile
+--- linux-2.6.21.1.old/lib/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/lib/Makefile	2007-05-26 19:03:45.721680152 +0200
+@@ -13,7 +13,7 @@
+ lib-y	+= kobject.o kref.o kobject_uevent.o klist.o
+ 
+ obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
+-	 bust_spinlocks.o hexdump.o
++	 bust_spinlocks.o hexdump.o LzmaDecode.o
+ 
+ ifeq ($(CONFIG_DEBUG_KOBJECT),y)
+ CFLAGS_kobject.o += -DDEBUG
+@@ -58,6 +58,7 @@
+ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+ 
+ obj-$(CONFIG_SWIOTLB) += swiotlb.o
++
+ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+ 
+ lib-$(CONFIG_GENERIC_BUG) += bug.o
diff --git a/target/linux/generic-2.6/patches-2.6.22/003-squashfs_lzma.patch b/target/linux/generic-2.6/patches-2.6.22/003-squashfs_lzma.patch
new file mode 100644
index 0000000000..1561461d9e
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/003-squashfs_lzma.patch
@@ -0,0 +1,108 @@
+diff -urN linux-2.6.21.1.old/fs/squashfs/inode.c linux-2.6.21.1.dev/fs/squashfs/inode.c
+--- linux-2.6.21.1.old/fs/squashfs/inode.c	2007-05-26 19:03:45.499713896 +0200
++++ linux-2.6.21.1.dev/fs/squashfs/inode.c	2007-05-26 19:07:27.951896024 +0200
+@@ -4,6 +4,9 @@
+  * Copyright (c) 2002, 2003, 2004, 2005, 2006
+  * Phillip Lougher <phillip@lougher.org.uk>
+  *
++ * LZMA decompressor support added by Oleg I. Vdovikin
++ * Copyright (c) 2005 Oleg I.Vdovikin <oleg@cs.msu.su>
++ *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License
+  * as published by the Free Software Foundation; either version 2,
+@@ -21,6 +24,7 @@
+  * inode.c
+  */
+ 
++#define SQUASHFS_LZMA
+ #include <linux/types.h>
+ #include <linux/squashfs_fs.h>
+ #include <linux/module.h>
+@@ -44,6 +48,19 @@
+ 
+ #include "squashfs.h"
+ 
++#ifdef SQUASHFS_LZMA
++#include <linux/LzmaDecode.h>
++
++/* default LZMA settings, should be in sync with mksquashfs */
++#define LZMA_LC 3
++#define LZMA_LP 0
++#define LZMA_PB 2
++
++#define LZMA_WORKSPACE_SIZE ((LZMA_BASE_SIZE + \
++      (LZMA_LIT_SIZE << (LZMA_LC + LZMA_LP))) * sizeof(CProb))
++
++#endif
++
+ static void squashfs_put_super(struct super_block *);
+ static int squashfs_statfs(struct dentry *, struct kstatfs *);
+ static int squashfs_symlink_readpage(struct file *file, struct page *page);
+@@ -64,7 +81,11 @@
+ 			const char *, void *, struct vfsmount *);
+ 
+ 
++#ifdef SQUASHFS_LZMA
++static unsigned char lzma_workspace[LZMA_WORKSPACE_SIZE];
++#else
+ static z_stream stream;
++#endif
+ 
+ static struct file_system_type squashfs_fs_type = {
+ 	.owner = THIS_MODULE,
+@@ -249,6 +270,15 @@
+ 	if (compressed) {
+ 		int zlib_err;
+ 
++#ifdef SQUASHFS_LZMA
++		if ((zlib_err = LzmaDecode(lzma_workspace,
++			LZMA_WORKSPACE_SIZE, LZMA_LC, LZMA_LP, LZMA_PB,
++			c_buffer, c_byte, buffer, msblk->read_size, &bytes)) != LZMA_RESULT_OK)
++		{
++			ERROR("lzma returned unexpected result 0x%x\n", zlib_err);
++			bytes = 0;
++		}
++#else
+ 		stream.next_in = c_buffer;
+ 		stream.avail_in = c_byte;
+ 		stream.next_out = buffer;
+@@ -263,7 +293,7 @@
+ 			bytes = 0;
+ 		} else
+ 			bytes = stream.total_out;
+-
++#endif
+ 		up(&msblk->read_data_mutex);
+ 	}
+ 
+@@ -2045,15 +2075,19 @@
+ 	printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) "
+ 		"Phillip Lougher\n");
+ 
++#ifndef SQUASHFS_LZMA
+ 	if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
+ 		ERROR("Failed to allocate zlib workspace\n");
+ 		destroy_inodecache();
+ 		err = -ENOMEM;
+ 		goto out;
+ 	}
++#endif
+ 
+ 	if ((err = register_filesystem(&squashfs_fs_type))) {
++#ifndef SQUASHFS_LZMA
+ 		vfree(stream.workspace);
++#endif
+ 		destroy_inodecache();
+ 	}
+ 
+@@ -2064,7 +2098,9 @@
+ 
+ static void __exit exit_squashfs_fs(void)
+ {
++#ifndef SQUASHFS_LZMA
+ 	vfree(stream.workspace);
++#endif
+ 	unregister_filesystem(&squashfs_fs_type);
+ 	destroy_inodecache();
+ }
diff --git a/target/linux/generic-2.6/patches-2.6.22/004-extra_optimization.patch b/target/linux/generic-2.6/patches-2.6.22/004-extra_optimization.patch
new file mode 100644
index 0000000000..57bea7ea50
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/004-extra_optimization.patch
@@ -0,0 +1,13 @@
+diff -urN linux-2.6.21.1.old/Makefile linux-2.6.21.1.dev/Makefile
+--- linux-2.6.21.1.old/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/Makefile	2007-05-26 19:14:22.967804016 +0200
+@@ -507,6 +507,9 @@
+ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
+ CHECKFLAGS     += $(NOSTDINC_FLAGS)
+ 
++# improve gcc optimization
++CFLAGS += $(call cc-option,-funit-at-a-time,)
++
+ # warn about C99 declaration after statement
+ CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
+ 
diff --git a/target/linux/generic-2.6/patches-2.6.22/006-gcc4_inline_fix.patch b/target/linux/generic-2.6/patches-2.6.22/006-gcc4_inline_fix.patch
new file mode 100644
index 0000000000..49c85a71ee
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/006-gcc4_inline_fix.patch
@@ -0,0 +1,12 @@
+diff -urN linux-2.6.21.1.old/include/asm-mips/system.h linux-2.6.21.1.dev/include/asm-mips/system.h
+--- linux-2.6.21.1.old/include/asm-mips/system.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/asm-mips/system.h	2007-05-26 19:26:30.870146040 +0200
+@@ -188,7 +188,7 @@
+    if something tries to do an invalid xchg().  */
+ extern void __xchg_called_with_bad_pointer(void);
+ 
+-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++static __always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+ {
+ 	switch (size) {
+ 	case 4:
diff --git a/target/linux/generic-2.6/patches-2.6.22/007-samsung_flash.patch b/target/linux/generic-2.6/patches-2.6.22/007-samsung_flash.patch
new file mode 100644
index 0000000000..8c94003081
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/007-samsung_flash.patch
@@ -0,0 +1,37 @@
+diff -urN linux-2.6.21.1.old/drivers/mtd/chips/cfi_cmdset_0002.c linux-2.6.21.1.dev/drivers/mtd/chips/cfi_cmdset_0002.c
+--- linux-2.6.21.1.old/drivers/mtd/chips/cfi_cmdset_0002.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/chips/cfi_cmdset_0002.c	2007-05-26 19:30:01.049193968 +0200
+@@ -51,6 +51,7 @@
+ #define SST49LF040B	        0x0050
+ #define SST49LF008A		0x005a
+ #define AT49BV6416		0x00d6
++#define MANUFACTURER_SAMSUNG	0x00ec
+ 
+ static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+ static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+@@ -294,12 +295,19 @@
+ 
+ 		if (extp->MajorVersion != '1' ||
+ 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+-			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
+-			       "version %c.%c.\n",  extp->MajorVersion,
+-			       extp->MinorVersion);
+-			kfree(extp);
+-			kfree(mtd);
+-			return NULL;
++		        if (cfi->mfr == MANUFACTURER_SAMSUNG &&
++			    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
++			    printk(KERN_NOTICE "  Newer Samsung flash detected, "
++			           "should be compatibile with Amd/Fujitsu.\n");
++		        }
++		        else {
++			    printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
++			           "version %c.%c.\n",  extp->MajorVersion,
++			           extp->MinorVersion);
++			    kfree(extp);
++			    kfree(mtd);
++			    return NULL;
++		        }
+ 		}
+ 
+ 		/* Install our own private info structure */
diff --git a/target/linux/generic-2.6/patches-2.6.22/009-revert_intel_flash_breakage.patch b/target/linux/generic-2.6/patches-2.6.22/009-revert_intel_flash_breakage.patch
new file mode 100644
index 0000000000..b188efe7b4
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/009-revert_intel_flash_breakage.patch
@@ -0,0 +1,170 @@
+diff -urN linux-2.6.21.1.old/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.21.1.dev/drivers/mtd/chips/cfi_cmdset_0001.c
+--- linux-2.6.21.1.old/drivers/mtd/chips/cfi_cmdset_0001.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/chips/cfi_cmdset_0001.c	2007-05-26 19:40:46.809023552 +0200
+@@ -933,7 +933,7 @@
+ 
+ static int __xipram xip_wait_for_operation(
+ 		struct map_info *map, struct flchip *chip,
+-		unsigned long adr, unsigned int chip_op_time )
++		unsigned long adr, int *chip_op_time )
+ {
+ 	struct cfi_private *cfi = map->fldrv_priv;
+ 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+@@ -942,7 +942,7 @@
+ 	flstate_t oldstate, newstate;
+ 
+        	start = xip_currtime();
+-	usec = chip_op_time * 8;
++	usec = *chip_op_time * 8;
+ 	if (usec == 0)
+ 		usec = 500000;
+ 	done = 0;
+@@ -1052,8 +1052,8 @@
+ #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
+ 	INVALIDATE_CACHED_RANGE(map, from, size)
+ 
+-#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
+-	xip_wait_for_operation(map, chip, cmd_adr, usec)
++#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
++	xip_wait_for_operation(map, chip, cmd_adr, p_usec)
+ 
+ #else
+ 
+@@ -1065,65 +1065,65 @@
+ static int inval_cache_and_wait_for_operation(
+ 		struct map_info *map, struct flchip *chip,
+ 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
+-		unsigned int chip_op_time)
++		int *chip_op_time )
+ {
+ 	struct cfi_private *cfi = map->fldrv_priv;
+ 	map_word status, status_OK = CMD(0x80);
+-	int chip_state = chip->state;
+-	unsigned int timeo, sleep_time;
++	int z, chip_state = chip->state;
++	unsigned long timeo;
+ 
+ 	spin_unlock(chip->mutex);
+ 	if (inval_len)
+ 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
++	if (*chip_op_time)
++		cfi_udelay(*chip_op_time);
+ 	spin_lock(chip->mutex);
+ 
+-	/* set our timeout to 8 times the expected delay */
+-	timeo = chip_op_time * 8;
+-	if (!timeo)
+-		timeo = 500000;
+-	sleep_time = chip_op_time / 2;
++	timeo = *chip_op_time * 8 * HZ / 1000000;
++	if (timeo < HZ/2)
++		timeo = HZ/2;
++	timeo += jiffies;
+ 
++	z = 0;
+ 	for (;;) {
++		if (chip->state != chip_state) {
++			/* Someone's suspended the operation: sleep */
++			DECLARE_WAITQUEUE(wait, current);
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			add_wait_queue(&chip->wq, &wait);
++			spin_unlock(chip->mutex);
++			schedule();
++			remove_wait_queue(&chip->wq, &wait);
++			timeo = jiffies + (HZ / 2); /* FIXME */
++			spin_lock(chip->mutex);
++			continue;
++		}
++
+ 		status = map_read(map, cmd_adr);
+ 		if (map_word_andequal(map, status, status_OK, status_OK))
+ 			break;
+ 
+-		if (!timeo) {
++		/* OK Still waiting */
++		if (time_after(jiffies, timeo)) {
+ 			map_write(map, CMD(0x70), cmd_adr);
+ 			chip->state = FL_STATUS;
+ 			return -ETIME;
+ 		}
+ 
+-		/* OK Still waiting. Drop the lock, wait a while and retry. */
++		/* Latency issues. Drop the lock, wait a while and retry */
++		z++;
+ 		spin_unlock(chip->mutex);
+-		if (sleep_time >= 1000000/HZ) {
+-			/*
+-			 * Half of the normal delay still remaining
+-			 * can be performed with a sleeping delay instead
+-			 * of busy waiting.
+-			 */
+-			msleep(sleep_time/1000);
+-			timeo -= sleep_time;
+-			sleep_time = 1000000/HZ;
+-		} else {
+-			udelay(1);
+-			cond_resched();
+-			timeo--;
+-		}
++		cfi_udelay(1);
+ 		spin_lock(chip->mutex);
+-
+-		while (chip->state != chip_state) {
+-			/* Someone's suspended the operation: sleep */
+-			DECLARE_WAITQUEUE(wait, current);
+-			set_current_state(TASK_UNINTERRUPTIBLE);
+-			add_wait_queue(&chip->wq, &wait);
+-			spin_unlock(chip->mutex);
+-			schedule();
+-			remove_wait_queue(&chip->wq, &wait);
+-			spin_lock(chip->mutex);
+-		}
+ 	}
+ 
++	if (!z) {
++		if (!--(*chip_op_time))
++			*chip_op_time = 1;
++	} else if (z > 1)
++		++(*chip_op_time);
++
+ 	/* Done and happy. */
+  	chip->state = FL_STATUS;
+ 	return 0;
+@@ -1132,7 +1132,8 @@
+ #endif
+ 
+ #define WAIT_TIMEOUT(map, chip, adr, udelay) \
+-	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
++	({ int __udelay = (udelay); \
++	   INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
+ 
+ 
+ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
+@@ -1356,7 +1357,7 @@
+ 
+ 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ 				   adr, map_bankwidth(map),
+-				   chip->word_write_time);
++				   &chip->word_write_time);
+ 	if (ret) {
+ 		xip_enable(map, chip, adr);
+ 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
+@@ -1593,7 +1594,7 @@
+ 
+ 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
+ 				   adr, len,
+-				   chip->buffer_write_time);
++				   &chip->buffer_write_time);
+ 	if (ret) {
+ 		map_write(map, CMD(0x70), cmd_adr);
+ 		chip->state = FL_STATUS;
+@@ -1728,7 +1729,7 @@
+ 
+ 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ 				   adr, len,
+-				   chip->erase_time);
++				   &chip->erase_time);
+ 	if (ret) {
+ 		map_write(map, CMD(0x70), adr);
+ 		chip->state = FL_STATUS;
diff --git a/target/linux/generic-2.6/patches-2.6.22/010-disable_old_squashfs_compatibility.patch b/target/linux/generic-2.6/patches-2.6.22/010-disable_old_squashfs_compatibility.patch
new file mode 100644
index 0000000000..de205a98ab
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/010-disable_old_squashfs_compatibility.patch
@@ -0,0 +1,21 @@
+diff -urN linux-2.6.21.1.old/fs/squashfs/Makefile linux-2.6.21.1.dev/fs/squashfs/Makefile
+--- linux-2.6.21.1.old/fs/squashfs/Makefile	2007-05-26 19:03:45.499713896 +0200
++++ linux-2.6.21.1.dev/fs/squashfs/Makefile	2007-05-26 19:43:37.064140840 +0200
+@@ -4,4 +4,3 @@
+ 
+ obj-$(CONFIG_SQUASHFS) += squashfs.o
+ squashfs-y += inode.o
+-squashfs-y += squashfs2_0.o
+diff -urN linux-2.6.21.1.old/fs/squashfs/squashfs.h linux-2.6.21.1.dev/fs/squashfs/squashfs.h
+--- linux-2.6.21.1.old/fs/squashfs/squashfs.h	2007-05-26 19:03:45.500713744 +0200
++++ linux-2.6.21.1.dev/fs/squashfs/squashfs.h	2007-05-26 19:43:37.075139168 +0200
+@@ -24,6 +24,9 @@
+ #ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
+ #undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
+ #endif
++#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++#undef CONFIG_SQUASHFS_2_0_COMPATIBILITY
++#endif
+ 
+ #ifdef SQUASHFS_TRACE
+ #define TRACE(s, args...)	printk(KERN_NOTICE "SQUASHFS: "s, ## args)
diff --git a/target/linux/generic-2.6/patches-2.6.22/011-mips_boot.patch b/target/linux/generic-2.6/patches-2.6.22/011-mips_boot.patch
new file mode 100644
index 0000000000..6b3ebf74d9
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/011-mips_boot.patch
@@ -0,0 +1,20 @@
+diff -urN linux-2.6.21.1.old/arch/mips/kernel/head.S linux-2.6.21.1.dev/arch/mips/kernel/head.S
+--- linux-2.6.21.1.old/arch/mips/kernel/head.S	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/kernel/head.S	2007-05-26 19:46:49.061952736 +0200
+@@ -129,11 +129,15 @@
+ #endif
+ 	.endm
+ 
++
++	j kernel_entry
++	nop
++
+ 	/*
+ 	 * Reserved space for exception handlers.
+ 	 * Necessary for machines which link their kernels at KSEG0.
+ 	 */
+-	.fill	0x400
++	.align 10
+ 
+ EXPORT(stext)					# used for profiling
+ EXPORT(_stext)
diff --git a/target/linux/generic-2.6/patches-2.6.22/012-mips_cpu_tlb.patch b/target/linux/generic-2.6/patches-2.6.22/012-mips_cpu_tlb.patch
new file mode 100644
index 0000000000..85b31e3348
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/012-mips_cpu_tlb.patch
@@ -0,0 +1,19 @@
+diff -urN linux-2.6.21.1.old/arch/mips/mm/tlbex.c linux-2.6.21.1.dev/arch/mips/mm/tlbex.c
+--- linux-2.6.21.1.old/arch/mips/mm/tlbex.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/mm/tlbex.c	2007-05-26 19:50:19.046030304 +0200
+@@ -887,7 +887,6 @@
+ 	case CPU_R10000:
+ 	case CPU_R12000:
+ 	case CPU_R14000:
+-	case CPU_4KC:
+ 	case CPU_SB1:
+ 	case CPU_SB1A:
+ 	case CPU_4KSC:
+@@ -915,6 +914,7 @@
+ 		tlbw(p);
+ 		break;
+ 
++	case CPU_4KC:
+ 	case CPU_4KEC:
+ 	case CPU_24K:
+ 	case CPU_34K:
diff --git a/target/linux/generic-2.6/patches-2.6.22/013-mips_generic_gpio_support.patch b/target/linux/generic-2.6/patches-2.6.22/013-mips_generic_gpio_support.patch
new file mode 100644
index 0000000000..69124bc242
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/013-mips_generic_gpio_support.patch
@@ -0,0 +1,35 @@
+diff -urN linux-2.6.21.1.old/arch/mips/defconfig linux-2.6.21.1.dev/arch/mips/defconfig
+--- linux-2.6.21.1.old/arch/mips/defconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/defconfig	2007-05-26 19:55:17.542651920 +0200
+@@ -69,6 +69,7 @@
+ CONFIG_GENERIC_HWEIGHT=y
+ CONFIG_GENERIC_CALIBRATE_DELAY=y
+ CONFIG_GENERIC_TIME=y
++CONFIG_GENERIC_GPIO=n
+ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+ # CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+ CONFIG_ARC=y
+diff -urN linux-2.6.21.1.old/arch/mips/Kconfig linux-2.6.21.1.dev/arch/mips/Kconfig
+--- linux-2.6.21.1.old/arch/mips/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/Kconfig	2007-05-26 19:55:17.500658304 +0200
+@@ -704,6 +704,10 @@
+ 	bool
+ 	default y
+ 
++config GENERIC_GPIO
++	bool
++	default n
++
+ config SCHED_NO_NO_OMIT_FRAME_POINTER
+ 	bool
+ 	default y
+diff -urN linux-2.6.21.1.old/include/asm-mips/gpio.h linux-2.6.21.1.dev/include/asm-mips/gpio.h
+--- linux-2.6.21.1.old/include/asm-mips/gpio.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/asm-mips/gpio.h	2007-05-26 19:55:17.500658304 +0200
+@@ -0,0 +1,6 @@
++#ifndef _ASM_MIPS_GPIO_H
++#define _ASM_MIPS_GPIO_H
++
++#include <gpio.h>
++
++#endif /* _ASM_MIPS_GPIO_H */
diff --git a/target/linux/generic-2.6/patches-2.6.22/060-rootfs_split.patch b/target/linux/generic-2.6/patches-2.6.22/060-rootfs_split.patch
new file mode 100644
index 0000000000..4610d3c01a
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/060-rootfs_split.patch
@@ -0,0 +1,410 @@
+diff -urN linux-2.6.21.1.old/drivers/mtd/Kconfig linux-2.6.21.1.dev/drivers/mtd/Kconfig
+--- linux-2.6.21.1.old/drivers/mtd/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/Kconfig	2007-05-26 19:58:42.320520952 +0200
+@@ -47,6 +47,11 @@
+ 	  devices. Partitioning on NFTL 'devices' is a different - that's the
+ 	  'normal' form of partitioning used on a block device.
+ 
++config MTD_SPLIT_ROOTFS
++	bool "Automatically split rootfs partition for squashfs"
++	depends on MTD_PARTITIONS
++	default y
++
+ config MTD_REDBOOT_PARTS
+ 	tristate "RedBoot partition table parsing"
+ 	depends on MTD_PARTITIONS
+diff -urN linux-2.6.21.1.old/drivers/mtd/mtdpart.c linux-2.6.21.1.dev/drivers/mtd/mtdpart.c
+--- linux-2.6.21.1.old/drivers/mtd/mtdpart.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/mtdpart.c	2007-05-26 19:58:42.331519280 +0200
+@@ -20,6 +20,8 @@
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/mtd/compatmac.h>
++#include <linux/squashfs_fs.h>
++#include <linux/root_dev.h>
+ 
+ /* Our partition linked list */
+ static LIST_HEAD(mtd_partitions);
+@@ -308,6 +310,171 @@
+ 	return 0;
+ }
+ 
++static u_int32_t cur_offset = 0;
++static int add_mtd_partition(struct mtd_info *master, const struct mtd_partition *part, int i)
++{
++	struct mtd_part *slave;
++	
++	/* allocate the partition structure */
++	slave = kzalloc (sizeof(*slave), GFP_KERNEL);
++	if (!slave) {
++		printk ("memory allocation error while creating partitions for \"%s\"\n",
++			master->name);
++		del_mtd_partitions(master);
++		return -ENOMEM;
++	}
++	list_add(&slave->list, &mtd_partitions);
++
++	/* set up the MTD object for this partition */
++	slave->mtd.type = master->type;
++	slave->mtd.flags = master->flags & ~part->mask_flags;
++	slave->mtd.size = part->size;
++	slave->mtd.writesize = master->writesize;
++	slave->mtd.oobsize = master->oobsize;
++	slave->mtd.oobavail = master->oobavail;
++	slave->mtd.subpage_sft = master->subpage_sft;
++
++	slave->mtd.name = part->name;
++	slave->mtd.owner = master->owner;
++
++	slave->mtd.read = part_read;
++	slave->mtd.write = part_write;
++
++	if(master->point && master->unpoint){
++		slave->mtd.point = part_point;
++		slave->mtd.unpoint = part_unpoint;
++	}
++
++	if (master->read_oob)
++		slave->mtd.read_oob = part_read_oob;
++	if (master->write_oob)
++		slave->mtd.write_oob = part_write_oob;
++	if(master->read_user_prot_reg)
++		slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
++	if(master->read_fact_prot_reg)
++		slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
++	if(master->write_user_prot_reg)
++		slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
++	if(master->lock_user_prot_reg)
++		slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
++	if(master->get_user_prot_info)
++		slave->mtd.get_user_prot_info = part_get_user_prot_info;
++	if(master->get_fact_prot_info)
++		slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
++	if (master->sync)
++		slave->mtd.sync = part_sync;
++	if (!i && master->suspend && master->resume) {
++			slave->mtd.suspend = part_suspend;
++			slave->mtd.resume = part_resume;
++	}
++	if (master->writev)
++		slave->mtd.writev = part_writev;
++	if (master->lock)
++		slave->mtd.lock = part_lock;
++	if (master->unlock)
++		slave->mtd.unlock = part_unlock;
++	if (master->block_isbad)
++		slave->mtd.block_isbad = part_block_isbad;
++	if (master->block_markbad)
++		slave->mtd.block_markbad = part_block_markbad;
++	slave->mtd.erase = part_erase;
++	slave->master = master;
++	slave->offset = part->offset;
++	slave->index = i;
++
++	if (slave->offset == MTDPART_OFS_APPEND)
++		slave->offset = cur_offset;
++	if (slave->offset == MTDPART_OFS_NXTBLK) {
++		slave->offset = cur_offset;
++		if ((cur_offset % master->erasesize) != 0) {
++			/* Round up to next erasesize */
++			slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
++			printk(KERN_NOTICE "Moving partition %d: "
++			       "0x%08x -> 0x%08x\n", i,
++			       cur_offset, slave->offset);
++		}
++	}
++	if (slave->mtd.size == MTDPART_SIZ_FULL)
++		slave->mtd.size = master->size - slave->offset;
++	cur_offset = slave->offset + slave->mtd.size;
++
++	printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
++		slave->offset + slave->mtd.size, slave->mtd.name);
++
++	/* let's do some sanity checks */
++	if (slave->offset >= master->size) {
++			/* let's register it anyway to preserve ordering */
++		slave->offset = 0;
++		slave->mtd.size = 0;
++		printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
++			part->name);
++	}
++	if (slave->offset + slave->mtd.size > master->size) {
++		slave->mtd.size = master->size - slave->offset;
++		printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
++			part->name, master->name, slave->mtd.size);
++	}
++	if (master->numeraseregions>1) {
++		/* Deal with variable erase size stuff */
++		int i;
++		struct mtd_erase_region_info *regions = master->eraseregions;
++
++		/* Find the first erase regions which is part of this partition. */
++		for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
++			;
++
++		for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
++			if (slave->mtd.erasesize < regions[i].erasesize) {
++				slave->mtd.erasesize = regions[i].erasesize;
++			}
++		}
++	} else {
++		/* Single erase size */
++		slave->mtd.erasesize = master->erasesize;
++	}
++
++	if ((slave->mtd.flags & MTD_WRITEABLE) &&
++	    (slave->offset % slave->mtd.erasesize)) {
++		/* Doesn't start on a boundary of major erase size */
++		/* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
++		slave->mtd.flags &= ~MTD_WRITEABLE;
++		printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
++			part->name);
++	}
++	if ((slave->mtd.flags & MTD_WRITEABLE) &&
++	    (slave->mtd.size % slave->mtd.erasesize)) {
++		slave->mtd.flags &= ~MTD_WRITEABLE;
++		printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
++			part->name);
++	}
++
++	slave->mtd.ecclayout = master->ecclayout;
++	if (master->block_isbad) {
++		uint32_t offs = 0;
++
++		while(offs < slave->mtd.size) {
++			if (master->block_isbad(master,
++						offs + slave->offset))
++				slave->mtd.ecc_stats.badblocks++;
++			offs += slave->mtd.erasesize;
++		}
++	}
++
++	if(part->mtdp)
++	{	/* store the object pointer (caller may or may not register it */
++		*part->mtdp = &slave->mtd;
++		slave->registered = 0;
++	}
++	else
++	{
++		/* register our partition */
++		add_mtd_device(&slave->mtd);
++		slave->registered = 1;
++	}
++
++	return 0;
++}
++
+ /*
+  * This function, given a master MTD object and a partition table, creates
+  * and registers slave MTD objects which are bound to the master according to
+@@ -319,169 +487,53 @@
+ 		       const struct mtd_partition *parts,
+ 		       int nbparts)
+ {
+-	struct mtd_part *slave;
+-	u_int32_t cur_offset = 0;
+-	int i;
++	struct mtd_partition *part;
++	int i, ret = 0;
+ 
+ 	printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+ 
+ 	for (i = 0; i < nbparts; i++) {
++		part = (struct mtd_partition *) &parts[i];
++		ret = add_mtd_partition(master, part, i);
++		if (ret)
++			return ret;
++		if (strcmp(part->name, "rootfs") == 0) {
++#ifdef CONFIG_MTD_SPLIT_ROOTFS
++			int len;
++			char buf[512];
++			struct squashfs_super_block *sb = (struct squashfs_super_block *) buf;
++#define ROOTFS_SPLIT_NAME "rootfs_data"
++			if ((master->read(master, part->offset, sizeof(struct squashfs_super_block), &len, buf) == 0) &&
++					(len == sizeof(struct squashfs_super_block)) &&
++					(*((u32 *) buf) == SQUASHFS_MAGIC) &&
++					(sb->bytes_used > 0)) {
++
++				
++				part = kmalloc(sizeof(struct mtd_partition), GFP_KERNEL);
++				memcpy(part, &parts[i], sizeof(struct mtd_partition));
++				
++				part->name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
++				strcpy(part->name, ROOTFS_SPLIT_NAME);
++
++				len = (u32) sb->bytes_used;
++				len += (part->offset & 0x000fffff);
++				len +=  (master->erasesize - 1);
++				len &= ~(master->erasesize - 1);
++				len -= (part->offset & 0x000fffff);
++				part->offset += len;
++				part->size -= len;
++			
++				if (master->erasesize <= part->size)
++					ret = add_mtd_partition(master, part, i + 1);
++				else
++					kfree(part->name);
++				if (ret)
++					return ret;
+ 
+-		/* allocate the partition structure */
+-		slave = kzalloc (sizeof(*slave), GFP_KERNEL);
+-		if (!slave) {
+-			printk ("memory allocation error while creating partitions for \"%s\"\n",
+-				master->name);
+-			del_mtd_partitions(master);
+-			return -ENOMEM;
+-		}
+-		list_add(&slave->list, &mtd_partitions);
+-
+-		/* set up the MTD object for this partition */
+-		slave->mtd.type = master->type;
+-		slave->mtd.flags = master->flags & ~parts[i].mask_flags;
+-		slave->mtd.size = parts[i].size;
+-		slave->mtd.writesize = master->writesize;
+-		slave->mtd.oobsize = master->oobsize;
+-		slave->mtd.oobavail = master->oobavail;
+-		slave->mtd.subpage_sft = master->subpage_sft;
+-
+-		slave->mtd.name = parts[i].name;
+-		slave->mtd.owner = master->owner;
+-
+-		slave->mtd.read = part_read;
+-		slave->mtd.write = part_write;
+-
+-		if(master->point && master->unpoint){
+-			slave->mtd.point = part_point;
+-			slave->mtd.unpoint = part_unpoint;
+-		}
+-
+-		if (master->read_oob)
+-			slave->mtd.read_oob = part_read_oob;
+-		if (master->write_oob)
+-			slave->mtd.write_oob = part_write_oob;
+-		if(master->read_user_prot_reg)
+-			slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
+-		if(master->read_fact_prot_reg)
+-			slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
+-		if(master->write_user_prot_reg)
+-			slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
+-		if(master->lock_user_prot_reg)
+-			slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
+-		if(master->get_user_prot_info)
+-			slave->mtd.get_user_prot_info = part_get_user_prot_info;
+-		if(master->get_fact_prot_info)
+-			slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
+-		if (master->sync)
+-			slave->mtd.sync = part_sync;
+-		if (!i && master->suspend && master->resume) {
+-				slave->mtd.suspend = part_suspend;
+-				slave->mtd.resume = part_resume;
+-		}
+-		if (master->writev)
+-			slave->mtd.writev = part_writev;
+-		if (master->lock)
+-			slave->mtd.lock = part_lock;
+-		if (master->unlock)
+-			slave->mtd.unlock = part_unlock;
+-		if (master->block_isbad)
+-			slave->mtd.block_isbad = part_block_isbad;
+-		if (master->block_markbad)
+-			slave->mtd.block_markbad = part_block_markbad;
+-		slave->mtd.erase = part_erase;
+-		slave->master = master;
+-		slave->offset = parts[i].offset;
+-		slave->index = i;
+-
+-		if (slave->offset == MTDPART_OFS_APPEND)
+-			slave->offset = cur_offset;
+-		if (slave->offset == MTDPART_OFS_NXTBLK) {
+-			slave->offset = cur_offset;
+-			if ((cur_offset % master->erasesize) != 0) {
+-				/* Round up to next erasesize */
+-				slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
+-				printk(KERN_NOTICE "Moving partition %d: "
+-				       "0x%08x -> 0x%08x\n", i,
+-				       cur_offset, slave->offset);
++				kfree(part);
+ 			}
+-		}
+-		if (slave->mtd.size == MTDPART_SIZ_FULL)
+-			slave->mtd.size = master->size - slave->offset;
+-		cur_offset = slave->offset + slave->mtd.size;
+-
+-		printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
+-			slave->offset + slave->mtd.size, slave->mtd.name);
+-
+-		/* let's do some sanity checks */
+-		if (slave->offset >= master->size) {
+-				/* let's register it anyway to preserve ordering */
+-			slave->offset = 0;
+-			slave->mtd.size = 0;
+-			printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
+-				parts[i].name);
+-		}
+-		if (slave->offset + slave->mtd.size > master->size) {
+-			slave->mtd.size = master->size - slave->offset;
+-			printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
+-				parts[i].name, master->name, slave->mtd.size);
+-		}
+-		if (master->numeraseregions>1) {
+-			/* Deal with variable erase size stuff */
+-			int i;
+-			struct mtd_erase_region_info *regions = master->eraseregions;
+-
+-			/* Find the first erase regions which is part of this partition. */
+-			for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
+-				;
+-
+-			for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
+-				if (slave->mtd.erasesize < regions[i].erasesize) {
+-					slave->mtd.erasesize = regions[i].erasesize;
+-				}
+-			}
+-		} else {
+-			/* Single erase size */
+-			slave->mtd.erasesize = master->erasesize;
+-		}
+-
+-		if ((slave->mtd.flags & MTD_WRITEABLE) &&
+-		    (slave->offset % slave->mtd.erasesize)) {
+-			/* Doesn't start on a boundary of major erase size */
+-			/* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
+-			slave->mtd.flags &= ~MTD_WRITEABLE;
+-			printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+-				parts[i].name);
+-		}
+-		if ((slave->mtd.flags & MTD_WRITEABLE) &&
+-		    (slave->mtd.size % slave->mtd.erasesize)) {
+-			slave->mtd.flags &= ~MTD_WRITEABLE;
+-			printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+-				parts[i].name);
+-		}
+-
+-		slave->mtd.ecclayout = master->ecclayout;
+-		if (master->block_isbad) {
+-			uint32_t offs = 0;
+-
+-			while(offs < slave->mtd.size) {
+-				if (master->block_isbad(master,
+-							offs + slave->offset))
+-					slave->mtd.ecc_stats.badblocks++;
+-				offs += slave->mtd.erasesize;
+-			}
+-		}
+-
+-		if(parts[i].mtdp)
+-		{	/* store the object pointer (caller may or may not register it */
+-			*parts[i].mtdp = &slave->mtd;
+-			slave->registered = 0;
+-		}
+-		else
+-		{
+-			/* register our partition */
+-			add_mtd_device(&slave->mtd);
+-			slave->registered = 1;
++#endif /* CONFIG_MTD_SPLIT_ROOTFS */
++			ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, i);
+ 		}
+ 	}
+ 
diff --git a/target/linux/generic-2.6/patches-2.6.22/065-block2mtd_init.patch b/target/linux/generic-2.6/patches-2.6.22/065-block2mtd_init.patch
new file mode 100644
index 0000000000..c653c6a3f3
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/065-block2mtd_init.patch
@@ -0,0 +1,113 @@
+diff -urN linux-2.6.21.1.old/drivers/mtd/devices/block2mtd.c linux-2.6.21.1.dev/drivers/mtd/devices/block2mtd.c
+--- linux-2.6.21.1.old/drivers/mtd/devices/block2mtd.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/mtd/devices/block2mtd.c	2007-05-26 20:06:13.547923960 +0200
+@@ -16,6 +16,7 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
+ #include <linux/buffer_head.h>
+ #include <linux/mutex.h>
+ #include <linux/mount.h>
+@@ -237,10 +238,11 @@
+ 
+ 
+ /* FIXME: ensure that mtd->size % erase_size == 0 */
+-static struct block2mtd_dev *add_device(char *devname, int erase_size)
++static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
+ {
+ 	struct block_device *bdev;
+ 	struct block2mtd_dev *dev;
++	struct mtd_partition *part;
+ 
+ 	if (!devname)
+ 		return NULL;
+@@ -279,14 +281,18 @@
+ 
+ 	/* Setup the MTD structure */
+ 	/* make the name contain the block device in */
+-	dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
+-			GFP_KERNEL);
++
++	if (!mtdname)
++		mtdname = devname;
++
++	dev->mtd.name = kmalloc(strlen(mtdname), GFP_KERNEL);
++
+ 	if (!dev->mtd.name)
+ 		goto devinit_err;
++	
++	strcpy(dev->mtd.name, mtdname);
+ 
+-	sprintf(dev->mtd.name, "block2mtd: %s", devname);
+-
+-	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
++	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
+ 	dev->mtd.erasesize = erase_size;
+ 	dev->mtd.writesize = 1;
+ 	dev->mtd.type = MTD_RAM;
+@@ -298,15 +304,18 @@
+ 	dev->mtd.read = block2mtd_read;
+ 	dev->mtd.priv = dev;
+ 	dev->mtd.owner = THIS_MODULE;
+-
+-	if (add_mtd_device(&dev->mtd)) {
++	
++	part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
++	part->name = dev->mtd.name;
++	part->offset = 0;
++	part->size = dev->mtd.size;
++	if (add_mtd_partitions(&dev->mtd, part, 1)) {
+ 		/* Device didnt get added, so free the entry */
+ 		goto devinit_err;
+ 	}
+ 	list_add(&dev->list, &blkmtd_device_list);
+ 	INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
+-			dev->mtd.name + strlen("blkmtd: "),
+-			dev->mtd.erasesize >> 10, dev->mtd.erasesize);
++			mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ 	return dev;
+ 
+ devinit_err:
+@@ -379,9 +388,9 @@
+ 
+ static int block2mtd_setup2(const char *val)
+ {
+-	char buf[80 + 12]; /* 80 for device, 12 for erase size */
++	char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
+ 	char *str = buf;
+-	char *token[2];
++	char *token[3];
+ 	char *name;
+ 	size_t erase_size = PAGE_SIZE;
+ 	int i, ret;
+@@ -392,7 +401,7 @@
+ 	strcpy(str, val);
+ 	kill_final_newline(str);
+ 
+-	for (i = 0; i < 2; i++)
++	for (i = 0; i < 3; i++)
+ 		token[i] = strsep(&str, ",");
+ 
+ 	if (str)
+@@ -412,8 +421,10 @@
+ 			parse_err("illegal erase size");
+ 		}
+ 	}
++	if (token[2] && (strlen(token[2]) + 1 > 80))
++		parse_err("mtd device name too long");
+ 
+-	add_device(name, erase_size);
++	add_device(name, erase_size, token[2]);
+ 
+ 	return 0;
+ }
+@@ -447,7 +458,7 @@
+ 
+ 
+ module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
++MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
+ 
+ static int __init block2mtd_init(void)
+ {
diff --git a/target/linux/generic-2.6/patches-2.6.22/100-netfilter_layer7_2.8.patch b/target/linux/generic-2.6/patches-2.6.22/100-netfilter_layer7_2.8.patch
new file mode 100644
index 0000000000..caccaa7e54
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/100-netfilter_layer7_2.8.patch
@@ -0,0 +1,2034 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_layer7.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_layer7.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_layer7.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_layer7.h	2007-05-26 20:13:52.648130120 +0200
+@@ -0,0 +1,26 @@
++/*
++  By Matthew Strait <quadong@users.sf.net>, Dec 2003.
++  http://l7-filter.sf.net
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version
++  2 of the License, or (at your option) any later version.
++  http://www.gnu.org/licenses/gpl.txt
++*/
++
++#ifndef _IPT_LAYER7_H
++#define _IPT_LAYER7_H
++
++#define MAX_PATTERN_LEN 8192
++#define MAX_PROTOCOL_LEN 256
++
++typedef char *(*proc_ipt_search) (char *, char, char *);
++
++struct ipt_layer7_info {
++    char protocol[MAX_PROTOCOL_LEN];
++    char invert:1;
++    char pattern[MAX_PATTERN_LEN];
++};
++
++#endif /* _IPT_LAYER7_H */
+diff -urN linux-2.6.21.1.old/net/netfilter/nf_conntrack_core.c linux-2.6.21.1.dev/net/netfilter/nf_conntrack_core.c
+--- linux-2.6.21.1.old/net/netfilter/nf_conntrack_core.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/netfilter/nf_conntrack_core.c	2007-05-26 20:13:52.649129968 +0200
+@@ -330,6 +330,13 @@
+ 	 * too. */
+ 	nf_ct_remove_expectations(ct);
+ 
++	#if defined(CONFIG_IP_NF_MATCH_LAYER7) || defined(CONFIG_IP_NF_MATCH_LAYER7_MODULE)
++	if(ct->layer7.app_proto)
++		kfree(ct->layer7.app_proto);
++	if(ct->layer7.app_data)
++		kfree(ct->layer7.app_data);
++	#endif
++
+ 	/* We overload first tuple to link into unconfirmed list. */
+ 	if (!nf_ct_is_confirmed(ct)) {
+ 		BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
+diff -urN linux-2.6.21.1.old/net/netfilter/nf_conntrack_standalone.c linux-2.6.21.1.dev/net/netfilter/nf_conntrack_standalone.c
+--- linux-2.6.21.1.old/net/netfilter/nf_conntrack_standalone.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/netfilter/nf_conntrack_standalone.c	2007-05-26 20:13:52.649129968 +0200
+@@ -184,6 +184,12 @@
+ 		return -ENOSPC;
+ #endif
+ 
++#if defined(CONFIG_IP_NF_MATCH_LAYER7) || defined(CONFIG_IP_NF_MATCH_LAYER7_MODULE)
++	if(conntrack->layer7.app_proto)
++		if (seq_printf(s, "l7proto=%s ",conntrack->layer7.app_proto))
++			return 1;
++#endif
++
+ 	if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use)))
+ 		return -ENOSPC;
+ 	
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_layer7.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_layer7.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_layer7.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_layer7.c	2007-05-26 20:13:52.650129816 +0200
+@@ -0,0 +1,573 @@
++/*
++  Kernel module to match application layer (OSI layer 7) data in connections.
++
++  http://l7-filter.sf.net
++
++  By Matthew Strait and Ethan Sommer, 2003-2006.
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version
++  2 of the License, or (at your option) any later version.
++  http://www.gnu.org/licenses/gpl.txt
++
++  Based on ipt_string.c (C) 2000 Emmanuel Roger <winfield@freegates.be>
++  and cls_layer7.c (C) 2003 Matthew Strait, Ethan Sommer, Justin Levandoski
++*/
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_conntrack.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/spinlock.h>
++
++#include "regexp/regexp.c"
++
++#include <linux/netfilter_ipv4/ipt_layer7.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++MODULE_AUTHOR("Matthew Strait <quadong@users.sf.net>, Ethan Sommer <sommere@users.sf.net>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("iptables application layer match module");
++MODULE_VERSION("2.0");
++
++static int maxdatalen = 2048; // this is the default
++module_param(maxdatalen, int, 0444);
++MODULE_PARM_DESC(maxdatalen, "maximum bytes of data looked at by l7-filter");
++
++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++	#define DPRINTK(format,args...) printk(format,##args)
++#else
++	#define DPRINTK(format,args...)
++#endif
++
++#define TOTAL_PACKETS master_conntrack->counters[IP_CT_DIR_ORIGINAL].packets + \
++		      master_conntrack->counters[IP_CT_DIR_REPLY].packets
++
++/* Number of packets whose data we look at.
++This can be modified through /proc/net/layer7_numpackets */
++static int num_packets = 10;
++
++static struct pattern_cache {
++	char * regex_string;
++	regexp * pattern;
++	struct pattern_cache * next;
++} * first_pattern_cache = NULL;
++
++/* I'm new to locking.  Here are my assumptions:
++
++- No one will write to /proc/net/layer7_numpackets over and over very fast;
++  if they did, nothing awful would happen.
++
++- This code will never be processing the same packet twice at the same time,
++  because iptables rules are traversed in order.
++
++- It doesn't matter if two packets from different connections are in here at
++  the same time, because they don't share any data.
++
++- It _does_ matter if two packets from the same connection (or one from a
++  master and one from its child) are here at the same time.  In this case,
++  we have to protect the conntracks and the list of compiled patterns.
++*/
++DEFINE_RWLOCK(ct_lock);
++DEFINE_SPINLOCK(list_lock);
++
++#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++/* Converts an unfriendly string into a friendly one by
++replacing unprintables with periods and all whitespace with " ". */
++static char * friendly_print(unsigned char * s)
++{
++	char * f = kmalloc(strlen(s) + 1, GFP_ATOMIC);
++	int i;
++
++	if(!f) {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory in friendly_print, bailing.\n");
++		return NULL;
++	}
++
++	for(i = 0; i < strlen(s); i++){
++		if(isprint(s[i]) && s[i] < 128)	f[i] = s[i];
++		else if(isspace(s[i]))		f[i] = ' ';
++		else 				f[i] = '.';
++	}
++	f[i] = '\0';
++	return f;
++}
++
++static char dec2hex(int i)
++{
++	switch (i) {
++		case 0 ... 9:
++			return (char)(i + '0');
++			break;
++		case 10 ... 15:
++			return (char)(i - 10 + 'a');
++			break;
++		default:
++			if (net_ratelimit())
++				printk("Problem in dec2hex\n");
++			return '\0';
++	}
++}
++
++static char * hex_print(unsigned char * s)
++{
++	char * g = kmalloc(strlen(s)*3 + 1, GFP_ATOMIC);
++	int i;
++
++	if(!g) {
++	       if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory in hex_print, bailing.\n");
++	       return NULL;
++	}
++
++	for(i = 0; i < strlen(s); i++) {
++		g[i*3    ] = dec2hex(s[i]/16);
++		g[i*3 + 1] = dec2hex(s[i]%16);
++		g[i*3 + 2] = ' ';
++	}
++	g[i*3] = '\0';
++
++	return g;
++}
++#endif // DEBUG
++
++/* Use instead of regcomp.  As we expect to be seeing the same regexps over and
++over again, it make sense to cache the results. */
++static regexp * compile_and_cache(char * regex_string, char * protocol)
++{
++	struct pattern_cache * node               = first_pattern_cache;
++	struct pattern_cache * last_pattern_cache = first_pattern_cache;
++	struct pattern_cache * tmp;
++	unsigned int len;
++
++	while (node != NULL) {
++		if (!strcmp(node->regex_string, regex_string))
++		return node->pattern;
++
++		last_pattern_cache = node;/* points at the last non-NULL node */
++		node = node->next;
++	}
++
++	/* If we reach the end of the list, then we have not yet cached
++	   the pattern for this regex. Let's do that now.
++	   Be paranoid about running out of memory to avoid list corruption. */
++	tmp = kmalloc(sizeof(struct pattern_cache), GFP_ATOMIC);
++
++	if(!tmp) {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory in compile_and_cache, bailing.\n");
++		return NULL;
++	}
++
++	tmp->regex_string  = kmalloc(strlen(regex_string) + 1, GFP_ATOMIC);
++	tmp->pattern       = kmalloc(sizeof(struct regexp),    GFP_ATOMIC);
++	tmp->next = NULL;
++
++	if(!tmp->regex_string || !tmp->pattern) {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory in compile_and_cache, bailing.\n");
++		kfree(tmp->regex_string);
++		kfree(tmp->pattern);
++		kfree(tmp);
++		return NULL;
++	}
++
++	/* Ok.  The new node is all ready now. */
++	node = tmp;
++
++	if(first_pattern_cache == NULL) /* list is empty */
++		first_pattern_cache = node; /* make node the beginning */
++	else
++		last_pattern_cache->next = node; /* attach node to the end */
++
++	/* copy the string and compile the regex */
++	len = strlen(regex_string);
++	DPRINTK("About to compile this: \"%s\"\n", regex_string);
++	node->pattern = regcomp(regex_string, &len);
++	if ( !node->pattern ) {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: Error compiling regexp \"%s\" (%s)\n", regex_string, protocol);
++		/* pattern is now cached as NULL, so we won't try again. */
++	}
++
++	strcpy(node->regex_string, regex_string);
++	return node->pattern;
++}
++
++static int can_handle(const struct sk_buff *skb)
++{
++	if(!ip_hdr(skb)) /* not IP */
++		return 0;
++	if(ip_hdr(skb)->protocol != IPPROTO_TCP &&
++	   ip_hdr(skb)->protocol != IPPROTO_UDP &&
++	   ip_hdr(skb)->protocol != IPPROTO_ICMP)
++		return 0;
++	return 1;
++}
++
++/* Returns offset the into the skb->data that the application data starts */
++static int app_data_offset(const struct sk_buff *skb)
++{
++	/* In case we are ported somewhere (ebtables?) where ip_hdr(skb)
++	isn't set, this can be gotten from 4*(skb->data[0] & 0x0f) as well. */
++	int ip_hl = ip_hdrlen(skb);
++
++	if( ip_hdr(skb)->protocol == IPPROTO_TCP ) {
++		/* 12 == offset into TCP header for the header length field.
++		Can't get this with skb->h.th->doff because the tcphdr
++		struct doesn't get set when routing (this is confirmed to be
++		true in Netfilter as well as QoS.) */
++		int tcp_hl = 4*(skb->data[ip_hl + 12] >> 4);
++
++		return ip_hl + tcp_hl;
++	} else if( ip_hdr(skb)->protocol == IPPROTO_UDP  ) {
++		return ip_hl + 8; /* UDP header is always 8 bytes */
++	} else if( ip_hdr(skb)->protocol == IPPROTO_ICMP ) {
++		return ip_hl + 8; /* ICMP header is 8 bytes */
++	} else {
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: tried to handle unknown protocol!\n");
++		return ip_hl + 8; /* something reasonable */
++	}
++}
++
++/* handles whether there's a match when we aren't appending data anymore */
++static int match_no_append(struct ip_conntrack * conntrack, struct ip_conntrack * master_conntrack,
++			enum ip_conntrack_info ctinfo, enum ip_conntrack_info master_ctinfo,
++			struct ipt_layer7_info * info)
++{
++	/* If we're in here, throw the app data away */
++	write_lock(&ct_lock);
++	if(master_conntrack->layer7.app_data != NULL) {
++
++	#ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
++		if(!master_conntrack->layer7.app_proto) {
++			char * f = friendly_print(master_conntrack->layer7.app_data);
++			char * g = hex_print(master_conntrack->layer7.app_data);
++			DPRINTK("\nl7-filter gave up after %d bytes (%d packets):\n%s\n",
++				strlen(f), TOTAL_PACKETS, f);
++			kfree(f);
++			DPRINTK("In hex: %s\n", g);
++			kfree(g);
++		}
++	#endif
++
++		kfree(master_conntrack->layer7.app_data);
++		master_conntrack->layer7.app_data = NULL; /* don't free again */
++	}
++	write_unlock(&ct_lock);
++
++	if(master_conntrack->layer7.app_proto){
++		/* Here child connections set their .app_proto (for /proc/net/ip_conntrack) */
++		write_lock(&ct_lock);
++		if(!conntrack->layer7.app_proto) {
++			conntrack->layer7.app_proto = kmalloc(strlen(master_conntrack->layer7.app_proto)+1, GFP_ATOMIC);
++			if(!conntrack->layer7.app_proto){
++				if (net_ratelimit())
++					printk(KERN_ERR "layer7: out of memory in match_no_append, bailing.\n");
++				write_unlock(&ct_lock);
++				return 1;
++			}
++			strcpy(conntrack->layer7.app_proto, master_conntrack->layer7.app_proto);
++		}
++		write_unlock(&ct_lock);
++
++		return (!strcmp(master_conntrack->layer7.app_proto, info->protocol));
++	}
++	else {
++		/* If not classified, set to "unknown" to distinguish from
++		connections that are still being tested. */
++		write_lock(&ct_lock);
++		master_conntrack->layer7.app_proto = kmalloc(strlen("unknown")+1, GFP_ATOMIC);
++		if(!master_conntrack->layer7.app_proto){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: out of memory in match_no_append, bailing.\n");
++			write_unlock(&ct_lock);
++			return 1;
++		}
++		strcpy(master_conntrack->layer7.app_proto, "unknown");
++		write_unlock(&ct_lock);
++		return 0;
++	}
++}
++
++/* add the new app data to the conntrack.  Return number of bytes added. */
++static int add_data(struct ip_conntrack * master_conntrack,
++			char * app_data, int appdatalen)
++{
++	int length = 0, i;
++	int oldlength = master_conntrack->layer7.app_data_len;
++
++	// This is a fix for a race condition by Deti Fliegl. However, I'm not 
++	// clear on whether the race condition exists or whether this really 
++	// fixes it.  I might just be being dense... Anyway, if it's not really 
++	// a fix, all it does is waste a very small amount of time.
++	if(!master_conntrack->layer7.app_data) return 0;
++
++	/* Strip nulls. Make everything lower case (our regex lib doesn't
++	do case insensitivity).  Add it to the end of the current data. */
++	for(i = 0; i < maxdatalen-oldlength-1 &&
++		   i < appdatalen; i++) {
++		if(app_data[i] != '\0') {
++			master_conntrack->layer7.app_data[length+oldlength] =
++				/* the kernel version of tolower mungs 'upper ascii' */
++				isascii(app_data[i])? tolower(app_data[i]) : app_data[i];
++			length++;
++		}
++	}
++
++	master_conntrack->layer7.app_data[length+oldlength] = '\0';
++	master_conntrack->layer7.app_data_len = length + oldlength;
++
++	return length;
++}
++
++/* Returns true on match and false otherwise.  */
++static int match(const struct sk_buff *skbin,
++	const struct net_device *in, const struct net_device *out,
++	const struct xt_match *match, const void *matchinfo,
++	int offset, unsigned int protoff, int *hotdrop)
++{
++	/* sidestep const without getting a compiler warning... */
++	struct sk_buff * skb = (struct sk_buff *)skbin; 
++
++	struct ipt_layer7_info * info = (struct ipt_layer7_info *)matchinfo;
++	enum ip_conntrack_info master_ctinfo, ctinfo;
++	struct ip_conntrack *master_conntrack, *conntrack;
++	unsigned char * app_data;
++	unsigned int pattern_result, appdatalen;
++	regexp * comppattern;
++
++	if(!can_handle(skb)){
++		DPRINTK("layer7: This is some protocol I can't handle.\n");
++		return info->invert;
++	}
++
++	/* Treat parent & all its children together as one connection, except
++	for the purpose of setting conntrack->layer7.app_proto in the actual
++	connection. This makes /proc/net/ip_conntrack more satisfying. */
++	if(!(conntrack = ip_conntrack_get((struct sk_buff *)skb, &ctinfo)) ||
++	   !(master_conntrack = ip_conntrack_get((struct sk_buff *)skb, &master_ctinfo))) {
++		//DPRINTK("layer7: packet is not from a known connection, giving up.\n");
++		return info->invert;
++	}
++
++	/* Try to get a master conntrack (and its master etc) for FTP, etc. */
++	while (master_ct(master_conntrack) != NULL)
++		master_conntrack = master_ct(master_conntrack);
++
++	/* if we've classified it or seen too many packets */
++	if(TOTAL_PACKETS > num_packets ||
++	   master_conntrack->layer7.app_proto) {
++
++		pattern_result = match_no_append(conntrack, master_conntrack, ctinfo, master_ctinfo, info);
++
++		/* skb->cb[0] == seen. Avoid doing things twice if there are two l7
++		rules. I'm not sure that using cb for this purpose is correct, although
++		it says "put your private variables there". But it doesn't look like it
++		is being used for anything else in the skbs that make it here. How can
++		I write to cb without making the compiler angry? */
++		skb->cb[0] = 1; /* marking it seen here is probably irrelevant, but consistant */
++
++		return (pattern_result ^ info->invert);
++	}
++
++	if(skb_is_nonlinear(skb)){
++		if(skb_linearize(skb) != 0){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: failed to linearize packet, bailing.\n");
++			return info->invert;
++		}
++	}
++
++	/* now that the skb is linearized, it's safe to set these. */
++	app_data = skb->data + app_data_offset(skb);
++	appdatalen = skb->tail - app_data;
++
++	spin_lock_bh(&list_lock);
++	/* the return value gets checked later, when we're ready to use it */
++	comppattern = compile_and_cache(info->pattern, info->protocol);
++	spin_unlock_bh(&list_lock);
++
++	/* On the first packet of a connection, allocate space for app data */
++	write_lock(&ct_lock);
++	if(TOTAL_PACKETS == 1 && !skb->cb[0] && !master_conntrack->layer7.app_data) {
++		master_conntrack->layer7.app_data = kmalloc(maxdatalen, GFP_ATOMIC);
++		if(!master_conntrack->layer7.app_data){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
++			write_unlock(&ct_lock);
++			return info->invert;
++		}
++
++		master_conntrack->layer7.app_data[0] = '\0';
++	}
++	write_unlock(&ct_lock);
++
++	/* Can be here, but unallocated, if numpackets is increased near
++	the beginning of a connection */
++	if(master_conntrack->layer7.app_data == NULL)
++		return (info->invert); /* unmatched */
++
++	if(!skb->cb[0]){
++		int newbytes;
++		write_lock(&ct_lock);
++		newbytes = add_data(master_conntrack, app_data, appdatalen);
++		write_unlock(&ct_lock);
++
++		if(newbytes == 0) { /* didn't add any data */
++			skb->cb[0] = 1;
++			/* Didn't match before, not going to match now */
++			return info->invert;
++		}
++	}
++
++	/* If looking for "unknown", then never match.  "Unknown" means that
++	we've given up; we're still trying with these packets. */
++	read_lock(&ct_lock);
++	if(!strcmp(info->protocol, "unknown")) {
++		pattern_result = 0;
++	/* If the regexp failed to compile, don't bother running it */
++	} else if(comppattern && regexec(comppattern, master_conntrack->layer7.app_data)) {
++		DPRINTK("layer7: matched %s\n", info->protocol);
++		pattern_result = 1;
++	} else pattern_result = 0;
++	read_unlock(&ct_lock);
++
++	if(pattern_result) {
++		write_lock(&ct_lock);
++		master_conntrack->layer7.app_proto = kmalloc(strlen(info->protocol)+1, GFP_ATOMIC);
++		if(!master_conntrack->layer7.app_proto){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
++			write_unlock(&ct_lock);
++			return (pattern_result ^ info->invert);
++		}
++		strcpy(master_conntrack->layer7.app_proto, info->protocol);
++		write_unlock(&ct_lock);
++	}
++
++	/* mark the packet seen */
++	skb->cb[0] = 1;
++
++	return (pattern_result ^ info->invert);
++}
++
++static struct ipt_match layer7_match = {
++	.name = "layer7",
++	.match = &match,
++	.matchsize  = sizeof(struct ipt_layer7_info),
++	.me = THIS_MODULE
++};
++
++/* taken from drivers/video/modedb.c */
++static int my_atoi(const char *s)
++{
++	int val = 0;
++
++	for (;; s++) {
++		switch (*s) {
++			case '0'...'9':
++			val = 10*val+(*s-'0');
++			break;
++		default:
++			return val;
++		}
++	}
++}
++
++/* write out num_packets to userland. */
++static int layer7_read_proc(char* page, char ** start, off_t off, int count,
++		     int* eof, void * data)
++{
++	if(num_packets > 99 && net_ratelimit())
++		printk(KERN_ERR "layer7: NOT REACHED. num_packets too big\n");
++
++	page[0] = num_packets/10 + '0';
++	page[1] = num_packets%10 + '0';
++	page[2] = '\n';
++	page[3] = '\0';
++
++	*eof=1;
++
++	return 3;
++}
++
++/* Read in num_packets from userland */
++static int layer7_write_proc(struct file* file, const char* buffer,
++		      unsigned long count, void *data)
++{
++	char * foo = kmalloc(count, GFP_ATOMIC);
++
++	if(!foo){
++		if (net_ratelimit())
++			printk(KERN_ERR "layer7: out of memory, bailing. num_packets unchanged.\n");
++		return count;
++	}
++
++	if(copy_from_user(foo, buffer, count)) {
++		return -EFAULT;
++	}
++
++
++	num_packets = my_atoi(foo);
++	kfree (foo);
++
++	/* This has an arbitrary limit to make the math easier. I'm lazy.
++	But anyway, 99 is a LOT! If you want more, you're doing it wrong! */
++	if(num_packets > 99) {
++		printk(KERN_WARNING "layer7: num_packets can't be > 99.\n");
++		num_packets = 99;
++	} else if(num_packets < 1) {
++		printk(KERN_WARNING "layer7: num_packets can't be < 1.\n");
++		num_packets = 1;
++	}
++
++	return count;
++}
++
++/* register the proc file */
++static void layer7_init_proc(void)
++{
++	struct proc_dir_entry* entry;
++	entry = create_proc_entry("layer7_numpackets", 0644, proc_net);
++	entry->read_proc = layer7_read_proc;
++	entry->write_proc = layer7_write_proc;
++}
++
++static void layer7_cleanup_proc(void)
++{
++	remove_proc_entry("layer7_numpackets", proc_net);
++}
++
++static int __init ipt_layer7_init(void)
++{
++	need_conntrack();
++
++	layer7_init_proc();
++	if(maxdatalen < 1) {
++		printk(KERN_WARNING "layer7: maxdatalen can't be < 1, using 1\n");
++		maxdatalen = 1;
++	}
++	/* This is not a hard limit.  It's just here to prevent people from
++	bringing their slow machines to a grinding halt. */
++	else if(maxdatalen > 65536) {
++		printk(KERN_WARNING "layer7: maxdatalen can't be > 65536, using 65536\n");
++		maxdatalen = 65536;
++	}
++	return ipt_register_match(&layer7_match);
++}
++
++static void __exit ipt_layer7_fini(void)
++{
++	layer7_cleanup_proc();
++	ipt_unregister_match(&layer7_match);
++}
++
++module_init(ipt_layer7_init);
++module_exit(ipt_layer7_fini);
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig	2007-05-26 20:13:52.650129816 +0200
+@@ -63,6 +63,24 @@
+ 
+ 	  To compile it as a module, choose M here.  If unsure, say N.
+ 
++config IP_NF_MATCH_LAYER7
++	tristate "Layer 7 match support (EXPERIMENTAL)"
++	depends on IP_NF_IPTABLES && IP_NF_CT_ACCT && IP_NF_CONNTRACK && EXPERIMENTAL
++	help
++	  Say Y if you want to be able to classify connections (and their
++	  packets) based on regular expression matching of their application
++	  layer data.   This is one way to classify applications such as
++	  peer-to-peer filesharing systems that do not always use the same
++	  port.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_MATCH_LAYER7_DEBUG
++	bool "Layer 7 debugging output"
++	depends on IP_NF_MATCH_LAYER7
++	help
++	  Say Y to get lots of debugging output.
++
+ config IP_NF_MATCH_TOS
+ 	tristate "TOS match support"
+ 	depends on IP_NF_IPTABLES
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Makefile linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile	2007-05-26 20:13:52.651129664 +0200
+@@ -50,6 +50,8 @@
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
+ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+ 
++obj-$(CONFIG_IP_NF_MATCH_LAYER7) += ipt_layer7.o
++
+ # targets
+ obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
+ obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regexp.c linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regexp.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regexp.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regexp.c	2007-05-26 20:13:52.652129512 +0200
+@@ -0,0 +1,1197 @@
++/*
++ * regcomp and regexec -- regsub and regerror are elsewhere
++ * @(#)regexp.c	1.3 of 18 April 87
++ *
++ *	Copyright (c) 1986 by University of Toronto.
++ *	Written by Henry Spencer.  Not derived from licensed software.
++ *
++ *	Permission is granted to anyone to use this software for any
++ *	purpose on any computer system, and to redistribute it freely,
++ *	subject to the following restrictions:
++ *
++ *	1. The author is not responsible for the consequences of use of
++ *		this software, no matter how awful, even if they arise
++ *		from defects in it.
++ *
++ *	2. The origin of this software must not be misrepresented, either
++ *		by explicit claim or by omission.
++ *
++ *	3. Altered versions must be plainly marked as such, and must not
++ *		be misrepresented as being the original software.
++ *
++ * Beware that some of this code is subtly aware of the way operator
++ * precedence is structured in regular expressions.  Serious changes in
++ * regular-expression syntax might require a total rethink.
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ * Modified slightly by Matthew Strait to use more modern C.
++ */
++
++#include "regexp.h"
++#include "regmagic.h"
++
++/* added by ethan and matt.  Lets it work in both kernel and user space.
++(So iptables can use it, for instance.)  Yea, it goes both ways... */
++#if __KERNEL__
++  #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++#else
++  #define printk(format,args...) printf(format,##args)
++#endif
++
++void regerror(char * s)
++{
++        printk("<3>Regexp: %s\n", s);
++        /* NOTREACHED */
++}
++
++/*
++ * The "internal use only" fields in regexp.h are present to pass info from
++ * compile to execute that permits the execute phase to run lots faster on
++ * simple cases.  They are:
++ *
++ * regstart	char that must begin a match; '\0' if none obvious
++ * reganch	is the match anchored (at beginning-of-line only)?
++ * regmust	string (pointer into program) that match must include, or NULL
++ * regmlen	length of regmust string
++ *
++ * Regstart and reganch permit very fast decisions on suitable starting points
++ * for a match, cutting down the work a lot.  Regmust permits fast rejection
++ * of lines that cannot possibly match.  The regmust tests are costly enough
++ * that regcomp() supplies a regmust only if the r.e. contains something
++ * potentially expensive (at present, the only such thing detected is * or +
++ * at the start of the r.e., which can involve a lot of backup).  Regmlen is
++ * supplied because the test in regexec() needs it and regcomp() is computing
++ * it anyway.
++ */
++
++/*
++ * Structure for regexp "program".  This is essentially a linear encoding
++ * of a nondeterministic finite-state machine (aka syntax charts or
++ * "railroad normal form" in parsing technology).  Each node is an opcode
++ * plus a "next" pointer, possibly plus an operand.  "Next" pointers of
++ * all nodes except BRANCH implement concatenation; a "next" pointer with
++ * a BRANCH on both ends of it is connecting two alternatives.  (Here we
++ * have one of the subtle syntax dependencies:  an individual BRANCH (as
++ * opposed to a collection of them) is never concatenated with anything
++ * because of operator precedence.)  The operand of some types of node is
++ * a literal string; for others, it is a node leading into a sub-FSM.  In
++ * particular, the operand of a BRANCH node is the first node of the branch.
++ * (NB this is *not* a tree structure:  the tail of the branch connects
++ * to the thing following the set of BRANCHes.)  The opcodes are:
++ */
++
++/* definition	number	opnd?	meaning */
++#define	END	0	/* no	End of program. */
++#define	BOL	1	/* no	Match "" at beginning of line. */
++#define	EOL	2	/* no	Match "" at end of line. */
++#define	ANY	3	/* no	Match any one character. */
++#define	ANYOF	4	/* str	Match any character in this string. */
++#define	ANYBUT	5	/* str	Match any character not in this string. */
++#define	BRANCH	6	/* node	Match this alternative, or the next... */
++#define	BACK	7	/* no	Match "", "next" ptr points backward. */
++#define	EXACTLY	8	/* str	Match this string. */
++#define	NOTHING	9	/* no	Match empty string. */
++#define	STAR	10	/* node	Match this (simple) thing 0 or more times. */
++#define	PLUS	11	/* node	Match this (simple) thing 1 or more times. */
++#define	OPEN	20	/* no	Mark this point in input as start of #n. */
++			/*	OPEN+1 is number 1, etc. */
++#define	CLOSE	30	/* no	Analogous to OPEN. */
++
++/*
++ * Opcode notes:
++ *
++ * BRANCH	The set of branches constituting a single choice are hooked
++ *		together with their "next" pointers, since precedence prevents
++ *		anything being concatenated to any individual branch.  The
++ *		"next" pointer of the last BRANCH in a choice points to the
++ *		thing following the whole choice.  This is also where the
++ *		final "next" pointer of each individual branch points; each
++ *		branch starts with the operand node of a BRANCH node.
++ *
++ * BACK		Normal "next" pointers all implicitly point forward; BACK
++ *		exists to make loop structures possible.
++ *
++ * STAR,PLUS	'?', and complex '*' and '+', are implemented as circular
++ *		BRANCH structures using BACK.  Simple cases (one character
++ *		per match) are implemented with STAR and PLUS for speed
++ *		and to minimize recursive plunges.
++ *
++ * OPEN,CLOSE	...are numbered at compile time.
++ */
++
++/*
++ * A node is one char of opcode followed by two chars of "next" pointer.
++ * "Next" pointers are stored as two 8-bit pieces, high order first.  The
++ * value is a positive offset from the opcode of the node containing it.
++ * An operand, if any, simply follows the node.  (Note that much of the
++ * code generation knows about this implicit relationship.)
++ *
++ * Using two bytes for the "next" pointer is vast overkill for most things,
++ * but allows patterns to get big without disasters.
++ */
++#define	OP(p)	(*(p))
++#define	NEXT(p)	(((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
++#define	OPERAND(p)	((p) + 3)
++
++/*
++ * See regmagic.h for one further detail of program structure.
++ */
++
++
++/*
++ * Utility definitions.
++ */
++#ifndef CHARBITS
++#define	UCHARAT(p)	((int)*(unsigned char *)(p))
++#else
++#define	UCHARAT(p)	((int)*(p)&CHARBITS)
++#endif
++
++#define	FAIL(m)	{ regerror(m); return(NULL); }
++#define	ISMULT(c)	((c) == '*' || (c) == '+' || (c) == '?')
++#define	META	"^$.[()|?+*\\"
++
++/*
++ * Flags to be passed up and down.
++ */
++#define	HASWIDTH	01	/* Known never to match null string. */
++#define	SIMPLE		02	/* Simple enough to be STAR/PLUS operand. */
++#define	SPSTART		04	/* Starts with * or +. */
++#define	WORST		0	/* Worst case. */
++
++/*
++ * Global work variables for regcomp().
++ */
++struct match_globals {
++char *reginput;		/* String-input pointer. */
++char *regbol;		/* Beginning of input, for ^ check. */
++char **regstartp;	/* Pointer to startp array. */
++char **regendp;		/* Ditto for endp. */
++char *regparse;		/* Input-scan pointer. */
++int regnpar;		/* () count. */
++char regdummy;
++char *regcode;		/* Code-emit pointer; &regdummy = don't. */
++long regsize;		/* Code size. */
++};
++
++/*
++ * Forward declarations for regcomp()'s friends.
++ */
++#ifndef STATIC
++#define	STATIC	static
++#endif
++STATIC char *reg(struct match_globals *g, int paren,int *flagp);
++STATIC char *regbranch(struct match_globals *g, int *flagp);
++STATIC char *regpiece(struct match_globals *g, int *flagp);
++STATIC char *regatom(struct match_globals *g, int *flagp);
++STATIC char *regnode(struct match_globals *g, char op);
++STATIC char *regnext(struct match_globals *g, char *p);
++STATIC void regc(struct match_globals *g, char b);
++STATIC void reginsert(struct match_globals *g, char op, char *opnd);
++STATIC void regtail(struct match_globals *g, char *p, char *val);
++STATIC void regoptail(struct match_globals *g, char *p, char *val);
++
++
++__kernel_size_t my_strcspn(const char *s1,const char *s2)
++{
++        char *scan1;
++        char *scan2;
++        int count;
++
++        count = 0;
++        for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
++                for (scan2 = (char *)s2; *scan2 != '\0';)       /* ++ moved down. */
++                        if (*scan1 == *scan2++)
++                                return(count);
++                count++;
++        }
++        return(count);
++}
++
++/*
++ - regcomp - compile a regular expression into internal code
++ *
++ * We can't allocate space until we know how big the compiled form will be,
++ * but we can't compile it (and thus know how big it is) until we've got a
++ * place to put the code.  So we cheat:  we compile it twice, once with code
++ * generation turned off and size counting turned on, and once "for real".
++ * This also means that we don't allocate space until we are sure that the
++ * thing really will compile successfully, and we never have to move the
++ * code and thus invalidate pointers into it.  (Note that it has to be in
++ * one piece because free() must be able to free it all.)
++ *
++ * Beware that the optimization-preparation code in here knows about some
++ * of the structure of the compiled regexp.
++ */
++regexp *
++regcomp(char *exp,int *patternsize)
++{
++	register regexp *r;
++	register char *scan;
++	register char *longest;
++	register int len;
++	int flags;
++	struct match_globals g;
++	
++	/* commented out by ethan
++	   extern char *malloc();
++	*/
++
++	if (exp == NULL)
++		FAIL("NULL argument");
++
++	/* First pass: determine size, legality. */
++	g.regparse = exp;
++	g.regnpar = 1;
++	g.regsize = 0L;
++	g.regcode = &g.regdummy;
++	regc(&g, MAGIC);
++	if (reg(&g, 0, &flags) == NULL)
++		return(NULL);
++
++	/* Small enough for pointer-storage convention? */
++	if (g.regsize >= 32767L)		/* Probably could be 65535L. */
++		FAIL("regexp too big");
++
++	/* Allocate space. */
++	*patternsize=sizeof(regexp) + (unsigned)g.regsize;
++	r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
++	if (r == NULL)
++		FAIL("out of space");
++
++	/* Second pass: emit code. */
++	g.regparse = exp;
++	g.regnpar = 1;
++	g.regcode = r->program;
++	regc(&g, MAGIC);
++	if (reg(&g, 0, &flags) == NULL)
++		return(NULL);
++
++	/* Dig out information for optimizations. */
++	r->regstart = '\0';	/* Worst-case defaults. */
++	r->reganch = 0;
++	r->regmust = NULL;
++	r->regmlen = 0;
++	scan = r->program+1;			/* First BRANCH. */
++	if (OP(regnext(&g, scan)) == END) {		/* Only one top-level choice. */
++		scan = OPERAND(scan);
++
++		/* Starting-point info. */
++		if (OP(scan) == EXACTLY)
++			r->regstart = *OPERAND(scan);
++		else if (OP(scan) == BOL)
++			r->reganch++;
++
++		/*
++		 * If there's something expensive in the r.e., find the
++		 * longest literal string that must appear and make it the
++		 * regmust.  Resolve ties in favor of later strings, since
++		 * the regstart check works with the beginning of the r.e.
++		 * and avoiding duplication strengthens checking.  Not a
++		 * strong reason, but sufficient in the absence of others.
++		 */
++		if (flags&SPSTART) {
++			longest = NULL;
++			len = 0;
++			for (; scan != NULL; scan = regnext(&g, scan))
++				if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
++					longest = OPERAND(scan);
++					len = strlen(OPERAND(scan));
++				}
++			r->regmust = longest;
++			r->regmlen = len;
++		}
++	}
++
++	return(r);
++}
++
++/*
++ - reg - regular expression, i.e. main body or parenthesized thing
++ *
++ * Caller must absorb opening parenthesis.
++ *
++ * Combining parenthesis handling with the base level of regular expression
++ * is a trifle forced, but the need to tie the tails of the branches to what
++ * follows makes it hard to avoid.
++ */
++static char *
++reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
++{
++	register char *ret;
++	register char *br;
++	register char *ender;
++	register int parno = 0; /* 0 makes gcc happy */
++	int flags;
++
++	*flagp = HASWIDTH;	/* Tentatively. */
++
++	/* Make an OPEN node, if parenthesized. */
++	if (paren) {
++		if (g->regnpar >= NSUBEXP)
++			FAIL("too many ()");
++		parno = g->regnpar;
++		g->regnpar++;
++		ret = regnode(g, OPEN+parno);
++	} else
++		ret = NULL;
++
++	/* Pick up the branches, linking them together. */
++	br = regbranch(g, &flags);
++	if (br == NULL)
++		return(NULL);
++	if (ret != NULL)
++		regtail(g, ret, br);	/* OPEN -> first. */
++	else
++		ret = br;
++	if (!(flags&HASWIDTH))
++		*flagp &= ~HASWIDTH;
++	*flagp |= flags&SPSTART;
++	while (*g->regparse == '|') {
++		g->regparse++;
++		br = regbranch(g, &flags);
++		if (br == NULL)
++			return(NULL);
++		regtail(g, ret, br);	/* BRANCH -> BRANCH. */
++		if (!(flags&HASWIDTH))
++			*flagp &= ~HASWIDTH;
++		*flagp |= flags&SPSTART;
++	}
++
++	/* Make a closing node, and hook it on the end. */
++	ender = regnode(g, (paren) ? CLOSE+parno : END);	
++	regtail(g, ret, ender);
++
++	/* Hook the tails of the branches to the closing node. */
++	for (br = ret; br != NULL; br = regnext(g, br))
++		regoptail(g, br, ender);
++
++	/* Check for proper termination. */
++	if (paren && *g->regparse++ != ')') {
++		FAIL("unmatched ()");
++	} else if (!paren && *g->regparse != '\0') {
++		if (*g->regparse == ')') {
++			FAIL("unmatched ()");
++		} else
++			FAIL("junk on end");	/* "Can't happen". */
++		/* NOTREACHED */
++	}
++
++	return(ret);
++}
++
++/*
++ - regbranch - one alternative of an | operator
++ *
++ * Implements the concatenation operator.
++ */
++static char *
++regbranch(struct match_globals *g, int *flagp)
++{
++	register char *ret;
++	register char *chain;
++	register char *latest;
++	int flags;
++
++	*flagp = WORST;		/* Tentatively. */
++
++	ret = regnode(g, BRANCH);
++	chain = NULL;
++	while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
++		latest = regpiece(g, &flags);
++		if (latest == NULL)
++			return(NULL);
++		*flagp |= flags&HASWIDTH;
++		if (chain == NULL)	/* First piece. */
++			*flagp |= flags&SPSTART;
++		else
++			regtail(g, chain, latest);
++		chain = latest;
++	}
++	if (chain == NULL)	/* Loop ran zero times. */
++		(void) regnode(g, NOTHING);
++
++	return(ret);
++}
++
++/*
++ - regpiece - something followed by possible [*+?]
++ *
++ * Note that the branching code sequences used for ? and the general cases
++ * of * and + are somewhat optimized:  they use the same NOTHING node as
++ * both the endmarker for their branch list and the body of the last branch.
++ * It might seem that this node could be dispensed with entirely, but the
++ * endmarker role is not redundant.
++ */
++static char *
++regpiece(struct match_globals *g, int *flagp)
++{
++	register char *ret;
++	register char op;
++	register char *next;
++	int flags;
++
++	ret = regatom(g, &flags);
++	if (ret == NULL)
++		return(NULL);
++
++	op = *g->regparse;
++	if (!ISMULT(op)) {
++		*flagp = flags;
++		return(ret);
++	}
++
++	if (!(flags&HASWIDTH) && op != '?')
++		FAIL("*+ operand could be empty");
++	*flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
++
++	if (op == '*' && (flags&SIMPLE))
++		reginsert(g, STAR, ret);
++	else if (op == '*') {
++		/* Emit x* as (x&|), where & means "self". */
++		reginsert(g, BRANCH, ret);			/* Either x */
++		regoptail(g, ret, regnode(g, BACK));		/* and loop */
++		regoptail(g, ret, ret);			/* back */
++		regtail(g, ret, regnode(g, BRANCH));		/* or */
++		regtail(g, ret, regnode(g, NOTHING));		/* null. */
++	} else if (op == '+' && (flags&SIMPLE))
++		reginsert(g, PLUS, ret);
++	else if (op == '+') {
++		/* Emit x+ as x(&|), where & means "self". */
++		next = regnode(g, BRANCH);			/* Either */
++		regtail(g, ret, next);
++		regtail(g, regnode(g, BACK), ret);		/* loop back */
++		regtail(g, next, regnode(g, BRANCH));		/* or */
++		regtail(g, ret, regnode(g, NOTHING));		/* null. */
++	} else if (op == '?') {
++		/* Emit x? as (x|) */
++		reginsert(g, BRANCH, ret);			/* Either x */
++		regtail(g, ret, regnode(g, BRANCH));		/* or */
++		next = regnode(g, NOTHING);		/* null. */
++		regtail(g, ret, next);
++		regoptail(g, ret, next);
++	}
++	g->regparse++;
++	if (ISMULT(*g->regparse))
++		FAIL("nested *?+");
++
++	return(ret);
++}
++
++/*
++ - regatom - the lowest level
++ *
++ * Optimization:  gobbles an entire sequence of ordinary characters so that
++ * it can turn them into a single node, which is smaller to store and
++ * faster to run.  Backslashed characters are exceptions, each becoming a
++ * separate node; the code is simpler that way and it's not worth fixing.
++ */
++static char *
++regatom(struct match_globals *g, int *flagp)
++{
++	register char *ret;
++	int flags;
++
++	*flagp = WORST;		/* Tentatively. */
++
++	switch (*g->regparse++) {
++	case '^':
++		ret = regnode(g, BOL);
++		break;
++	case '$':
++		ret = regnode(g, EOL);
++		break;
++	case '.':
++		ret = regnode(g, ANY);
++		*flagp |= HASWIDTH|SIMPLE;
++		break;
++	case '[': {
++			register int class;
++			register int classend;
++
++			if (*g->regparse == '^') {	/* Complement of range. */
++				ret = regnode(g, ANYBUT);
++				g->regparse++;
++			} else
++				ret = regnode(g, ANYOF);
++			if (*g->regparse == ']' || *g->regparse == '-')
++				regc(g, *g->regparse++);
++			while (*g->regparse != '\0' && *g->regparse != ']') {
++				if (*g->regparse == '-') {
++					g->regparse++;
++					if (*g->regparse == ']' || *g->regparse == '\0')
++						regc(g, '-');
++					else {
++						class = UCHARAT(g->regparse-2)+1;
++						classend = UCHARAT(g->regparse);
++						if (class > classend+1)
++							FAIL("invalid [] range");
++						for (; class <= classend; class++)
++							regc(g, class);
++						g->regparse++;
++					}
++				} else
++					regc(g, *g->regparse++);
++			}
++			regc(g, '\0');
++			if (*g->regparse != ']')
++				FAIL("unmatched []");
++			g->regparse++;
++			*flagp |= HASWIDTH|SIMPLE;
++		}
++		break;
++	case '(':
++		ret = reg(g, 1, &flags);
++		if (ret == NULL)
++			return(NULL);
++		*flagp |= flags&(HASWIDTH|SPSTART);
++		break;
++	case '\0':
++	case '|':
++	case ')':
++		FAIL("internal urp");	/* Supposed to be caught earlier. */
++		break;
++	case '?':
++	case '+':
++	case '*':
++		FAIL("?+* follows nothing");
++		break;
++	case '\\':
++		if (*g->regparse == '\0')
++			FAIL("trailing \\");
++		ret = regnode(g, EXACTLY);
++		regc(g, *g->regparse++);
++		regc(g, '\0');
++		*flagp |= HASWIDTH|SIMPLE;
++		break;
++	default: {
++			register int len;
++			register char ender;
++
++			g->regparse--;
++			len = my_strcspn((const char *)g->regparse, (const char *)META);
++			if (len <= 0)
++				FAIL("internal disaster");
++			ender = *(g->regparse+len);
++			if (len > 1 && ISMULT(ender))
++				len--;		/* Back off clear of ?+* operand. */
++			*flagp |= HASWIDTH;
++			if (len == 1)
++				*flagp |= SIMPLE;
++			ret = regnode(g, EXACTLY);
++			while (len > 0) {
++				regc(g, *g->regparse++);
++				len--;
++			}
++			regc(g, '\0');
++		}
++		break;
++	}
++
++	return(ret);
++}
++
++/*
++ - regnode - emit a node
++ */
++static char *			/* Location. */
++regnode(struct match_globals *g, char op)
++{
++	register char *ret;
++	register char *ptr;
++
++	ret = g->regcode;
++	if (ret == &g->regdummy) {
++		g->regsize += 3;
++		return(ret);
++	}
++
++	ptr = ret;
++	*ptr++ = op;
++	*ptr++ = '\0';		/* Null "next" pointer. */
++	*ptr++ = '\0';
++	g->regcode = ptr;
++
++	return(ret);
++}
++
++/*
++ - regc - emit (if appropriate) a byte of code
++ */
++static void
++regc(struct match_globals *g, char b)
++{
++	if (g->regcode != &g->regdummy)
++		*g->regcode++ = b;
++	else
++		g->regsize++;
++}
++
++/*
++ - reginsert - insert an operator in front of already-emitted operand
++ *
++ * Means relocating the operand.
++ */
++static void
++reginsert(struct match_globals *g, char op, char* opnd)
++{
++	register char *src;
++	register char *dst;
++	register char *place;
++
++	if (g->regcode == &g->regdummy) {
++		g->regsize += 3;
++		return;
++	}
++
++	src = g->regcode;
++	g->regcode += 3;
++	dst = g->regcode;
++	while (src > opnd)
++		*--dst = *--src;
++
++	place = opnd;		/* Op node, where operand used to be. */
++	*place++ = op;
++	*place++ = '\0';
++	*place++ = '\0';
++}
++
++/*
++ - regtail - set the next-pointer at the end of a node chain
++ */
++static void
++regtail(struct match_globals *g, char *p, char *val)
++{
++	register char *scan;
++	register char *temp;
++	register int offset;
++
++	if (p == &g->regdummy)
++		return;
++
++	/* Find last node. */
++	scan = p;
++	for (;;) {
++		temp = regnext(g, scan);
++		if (temp == NULL)
++			break;
++		scan = temp;
++	}
++
++	if (OP(scan) == BACK)
++		offset = scan - val;
++	else
++		offset = val - scan;
++	*(scan+1) = (offset>>8)&0377;
++	*(scan+2) = offset&0377;
++}
++
++/*
++ - regoptail - regtail on operand of first argument; nop if operandless
++ */
++static void
++regoptail(struct match_globals *g, char *p, char *val)
++{
++	/* "Operandless" and "op != BRANCH" are synonymous in practice. */
++	if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
++		return;
++	regtail(g, OPERAND(p), val);
++}
++
++/*
++ * regexec and friends
++ */
++
++
++/*
++ * Forwards.
++ */
++STATIC int regtry(struct match_globals *g, regexp *prog, char *string);
++STATIC int regmatch(struct match_globals *g, char *prog);
++STATIC int regrepeat(struct match_globals *g, char *p);
++
++#ifdef DEBUG
++int regnarrate = 0;
++void regdump();
++STATIC char *regprop(char *op);
++#endif
++
++/*
++ - regexec - match a regexp against a string
++ */
++int
++regexec(regexp *prog, char *string)
++{
++	register char *s;
++	struct match_globals g;
++
++	/* Be paranoid... */
++	if (prog == NULL || string == NULL) {
++		printk("<3>Regexp: NULL parameter\n");
++		return(0);
++	}
++
++	/* Check validity of program. */
++	if (UCHARAT(prog->program) != MAGIC) {
++		printk("<3>Regexp: corrupted program\n");
++		return(0);
++	}
++
++	/* If there is a "must appear" string, look for it. */
++	if (prog->regmust != NULL) {
++		s = string;
++		while ((s = strchr(s, prog->regmust[0])) != NULL) {
++			if (strncmp(s, prog->regmust, prog->regmlen) == 0)
++				break;	/* Found it. */
++			s++;
++		}
++		if (s == NULL)	/* Not present. */
++			return(0);
++	}
++
++	/* Mark beginning of line for ^ . */
++	g.regbol = string;
++
++	/* Simplest case:  anchored match need be tried only once. */
++	if (prog->reganch)
++		return(regtry(&g, prog, string));
++
++	/* Messy cases:  unanchored match. */
++	s = string;
++	if (prog->regstart != '\0')
++		/* We know what char it must start with. */
++		while ((s = strchr(s, prog->regstart)) != NULL) {
++			if (regtry(&g, prog, s))
++				return(1);
++			s++;
++		}
++	else
++		/* We don't -- general case. */
++		do {
++			if (regtry(&g, prog, s))
++				return(1);
++		} while (*s++ != '\0');
++
++	/* Failure. */
++	return(0);
++}
++
++/*
++ - regtry - try match at specific point
++ */
++static int			/* 0 failure, 1 success */
++regtry(struct match_globals *g, regexp *prog, char *string)
++{
++	register int i;
++	register char **sp;
++	register char **ep;
++
++	g->reginput = string;
++	g->regstartp = prog->startp;
++	g->regendp = prog->endp;
++
++	sp = prog->startp;
++	ep = prog->endp;
++	for (i = NSUBEXP; i > 0; i--) {
++		*sp++ = NULL;
++		*ep++ = NULL;
++	}
++	if (regmatch(g, prog->program + 1)) {
++		prog->startp[0] = string;
++		prog->endp[0] = g->reginput;
++		return(1);
++	} else
++		return(0);
++}
++
++/*
++ - regmatch - main matching routine
++ *
++ * Conceptually the strategy is simple:  check to see whether the current
++ * node matches, call self recursively to see whether the rest matches,
++ * and then act accordingly.  In practice we make some effort to avoid
++ * recursion, in particular by going through "ordinary" nodes (that don't
++ * need to know whether the rest of the match failed) by a loop instead of
++ * by recursion.
++ */
++static int			/* 0 failure, 1 success */
++regmatch(struct match_globals *g, char *prog)
++{
++	register char *scan = prog; /* Current node. */
++	char *next;		    /* Next node. */
++
++#ifdef DEBUG
++	if (scan != NULL && regnarrate)
++		fprintf(stderr, "%s(\n", regprop(scan));
++#endif
++	while (scan != NULL) {
++#ifdef DEBUG
++		if (regnarrate)
++			fprintf(stderr, "%s...\n", regprop(scan));
++#endif
++		next = regnext(g, scan);
++
++		switch (OP(scan)) {
++		case BOL:
++			if (g->reginput != g->regbol)
++				return(0);
++			break;
++		case EOL:
++			if (*g->reginput != '\0')
++				return(0);
++			break;
++		case ANY:
++			if (*g->reginput == '\0')
++				return(0);
++			g->reginput++;
++			break;
++		case EXACTLY: {
++				register int len;
++				register char *opnd;
++
++				opnd = OPERAND(scan);
++				/* Inline the first character, for speed. */
++				if (*opnd != *g->reginput)
++					return(0);
++				len = strlen(opnd);
++				if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
++					return(0);
++				g->reginput += len;
++			}
++			break;
++		case ANYOF:
++			if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
++				return(0);
++			g->reginput++;
++			break;
++		case ANYBUT:
++			if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
++				return(0);
++			g->reginput++;
++			break;
++		case NOTHING:
++		case BACK:
++			break;
++		case OPEN+1:
++		case OPEN+2:
++		case OPEN+3:
++		case OPEN+4:
++		case OPEN+5:
++		case OPEN+6:
++		case OPEN+7:
++		case OPEN+8:
++		case OPEN+9: {
++				register int no;
++				register char *save;
++
++				no = OP(scan) - OPEN;
++				save = g->reginput;
++
++				if (regmatch(g, next)) {
++					/*
++					 * Don't set startp if some later
++					 * invocation of the same parentheses
++					 * already has.
++					 */
++					if (g->regstartp[no] == NULL)
++						g->regstartp[no] = save;
++					return(1);
++				} else
++					return(0);
++			}
++			break;
++		case CLOSE+1:
++		case CLOSE+2:
++		case CLOSE+3:
++		case CLOSE+4:
++		case CLOSE+5:
++		case CLOSE+6:
++		case CLOSE+7:
++		case CLOSE+8:
++		case CLOSE+9:
++			{
++				register int no;
++				register char *save;
++
++				no = OP(scan) - CLOSE;
++				save = g->reginput;
++
++				if (regmatch(g, next)) {
++					/*
++					 * Don't set endp if some later
++					 * invocation of the same parentheses
++					 * already has.
++					 */
++					if (g->regendp[no] == NULL)
++						g->regendp[no] = save;
++					return(1);
++				} else
++					return(0);
++			}
++			break;
++		case BRANCH: {
++				register char *save;
++
++				if (OP(next) != BRANCH)		/* No choice. */
++					next = OPERAND(scan);	/* Avoid recursion. */
++				else {
++					do {
++						save = g->reginput;
++						if (regmatch(g, OPERAND(scan)))
++							return(1);
++						g->reginput = save;
++						scan = regnext(g, scan);
++					} while (scan != NULL && OP(scan) == BRANCH);
++					return(0);
++					/* NOTREACHED */
++				}
++			}
++			break;
++		case STAR:
++		case PLUS: {
++				register char nextch;
++				register int no;
++				register char *save;
++				register int min;
++
++				/*
++				 * Lookahead to avoid useless match attempts
++				 * when we know what character comes next.
++				 */
++				nextch = '\0';
++				if (OP(next) == EXACTLY)
++					nextch = *OPERAND(next);
++				min = (OP(scan) == STAR) ? 0 : 1;
++				save = g->reginput;
++				no = regrepeat(g, OPERAND(scan));
++				while (no >= min) {
++					/* If it could work, try it. */
++					if (nextch == '\0' || *g->reginput == nextch)
++						if (regmatch(g, next))
++							return(1);
++					/* Couldn't or didn't -- back up. */
++					no--;
++					g->reginput = save + no;
++				}
++				return(0);
++			}
++			break;
++		case END:
++			return(1);	/* Success! */
++			break;
++		default:
++			printk("<3>Regexp: memory corruption\n");
++			return(0);
++			break;
++		}
++
++		scan = next;
++	}
++
++	/*
++	 * We get here only if there's trouble -- normally "case END" is
++	 * the terminating point.
++	 */
++	printk("<3>Regexp: corrupted pointers\n");
++	return(0);
++}
++
++/*
++ - regrepeat - repeatedly match something simple, report how many
++ */
++static int
++regrepeat(struct match_globals *g, char *p)
++{
++	register int count = 0;
++	register char *scan;
++	register char *opnd;
++
++	scan = g->reginput;
++	opnd = OPERAND(p);
++	switch (OP(p)) {
++	case ANY:
++		count = strlen(scan);
++		scan += count;
++		break;
++	case EXACTLY:
++		while (*opnd == *scan) {
++			count++;
++			scan++;
++		}
++		break;
++	case ANYOF:
++		while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
++			count++;
++			scan++;
++		}
++		break;
++	case ANYBUT:
++		while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
++			count++;
++			scan++;
++		}
++		break;
++	default:		/* Oh dear.  Called inappropriately. */
++		printk("<3>Regexp: internal foulup\n");
++		count = 0;	/* Best compromise. */
++		break;
++	}
++	g->reginput = scan;
++
++	return(count);
++}
++
++/*
++ - regnext - dig the "next" pointer out of a node
++ */
++static char*
++regnext(struct match_globals *g, char *p)
++{
++	register int offset;
++
++	if (p == &g->regdummy)
++		return(NULL);
++
++	offset = NEXT(p);
++	if (offset == 0)
++		return(NULL);
++
++	if (OP(p) == BACK)
++		return(p-offset);
++	else
++		return(p+offset);
++}
++
++#ifdef DEBUG
++
++STATIC char *regprop();
++
++/*
++ - regdump - dump a regexp onto stdout in vaguely comprehensible form
++ */
++void
++regdump(regexp *r)
++{
++	register char *s;
++	register char op = EXACTLY;	/* Arbitrary non-END op. */
++	register char *next;
++	/* extern char *strchr(); */
++
++
++	s = r->program + 1;
++	while (op != END) {	/* While that wasn't END last time... */
++		op = OP(s);
++		printf("%2d%s", s-r->program, regprop(s));	/* Where, what. */
++		next = regnext(s);
++		if (next == NULL)		/* Next ptr. */
++			printf("(0)");
++		else
++			printf("(%d)", (s-r->program)+(next-s));
++		s += 3;
++		if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
++			/* Literal string, where present. */
++			while (*s != '\0') {
++				putchar(*s);
++				s++;
++			}
++			s++;
++		}
++		putchar('\n');
++	}
++
++	/* Header fields of interest. */
++	if (r->regstart != '\0')
++		printf("start `%c' ", r->regstart);
++	if (r->reganch)
++		printf("anchored ");
++	if (r->regmust != NULL)
++		printf("must have \"%s\"", r->regmust);
++	printf("\n");
++}
++
++/*
++ - regprop - printable representation of opcode
++ */
++static char *
++regprop(char *op)
++{
++#define BUFLEN 50
++	register char *p;
++	static char buf[BUFLEN];
++
++	strcpy(buf, ":");
++
++	switch (OP(op)) {
++	case BOL:
++		p = "BOL";
++		break;
++	case EOL:
++		p = "EOL";
++		break;
++	case ANY:
++		p = "ANY";
++		break;
++	case ANYOF:
++		p = "ANYOF";
++		break;
++	case ANYBUT:
++		p = "ANYBUT";
++		break;
++	case BRANCH:
++		p = "BRANCH";
++		break;
++	case EXACTLY:
++		p = "EXACTLY";
++		break;
++	case NOTHING:
++		p = "NOTHING";
++		break;
++	case BACK:
++		p = "BACK";
++		break;
++	case END:
++		p = "END";
++		break;
++	case OPEN+1:
++	case OPEN+2:
++	case OPEN+3:
++	case OPEN+4:
++	case OPEN+5:
++	case OPEN+6:
++	case OPEN+7:
++	case OPEN+8:
++	case OPEN+9:
++		snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
++		p = NULL;
++		break;
++	case CLOSE+1:
++	case CLOSE+2:
++	case CLOSE+3:
++	case CLOSE+4:
++	case CLOSE+5:
++	case CLOSE+6:
++	case CLOSE+7:
++	case CLOSE+8:
++	case CLOSE+9:
++		snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
++		p = NULL;
++		break;
++	case STAR:
++		p = "STAR";
++		break;
++	case PLUS:
++		p = "PLUS";
++		break;
++	default:
++		printk("<3>Regexp: corrupted opcode\n");
++		break;
++	}
++	if (p != NULL)
++		strncat(buf, p, BUFLEN-strlen(buf));
++	return(buf);
++}
++#endif
++
++
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regexp.h linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regexp.h
+--- linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regexp.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regexp.h	2007-05-26 20:13:52.652129512 +0200
+@@ -0,0 +1,41 @@
++/*
++ * Definitions etc. for regexp(3) routines.
++ *
++ * Caveat:  this is V8 regexp(3) [actually, a reimplementation thereof],
++ * not the System V one.
++ */
++
++#ifndef REGEXP_H
++#define REGEXP_H
++
++
++/*
++http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
++which contains a version of this library, says:
++
++ *
++ * NSUBEXP must be at least 10, and no greater than 117 or the parser
++ * will not work properly.
++ *
++
++However, it looks rather like this library is limited to 10.  If you think
++otherwise, let us know.
++*/
++
++#define NSUBEXP  10
++typedef struct regexp {
++	char *startp[NSUBEXP];
++	char *endp[NSUBEXP];
++	char regstart;		/* Internal use only. */
++	char reganch;		/* Internal use only. */
++	char *regmust;		/* Internal use only. */
++	int regmlen;		/* Internal use only. */
++	char program[1];	/* Unwarranted chumminess with compiler. */
++} regexp;
++
++regexp * regcomp(char *exp, int *patternsize);
++int regexec(regexp *prog, char *string);
++void regsub(regexp *prog, char *source, char *dest);
++void regerror(char *s);
++
++#endif
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regmagic.h linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regmagic.h
+--- linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regmagic.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regmagic.h	2007-05-26 20:13:52.652129512 +0200
+@@ -0,0 +1,5 @@
++/*
++ * The first byte of the regexp internal "program" is actually this magic
++ * number; the start node begins in the second byte.
++ */
++#define	MAGIC	0234
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regsub.c linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regsub.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/regexp/regsub.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/regexp/regsub.c	2007-05-26 20:13:52.653129360 +0200
+@@ -0,0 +1,95 @@
++/*
++ * regsub
++ * @(#)regsub.c	1.3 of 2 April 86
++ *
++ *	Copyright (c) 1986 by University of Toronto.
++ *	Written by Henry Spencer.  Not derived from licensed software.
++ *
++ *	Permission is granted to anyone to use this software for any
++ *	purpose on any computer system, and to redistribute it freely,
++ *	subject to the following restrictions:
++ *
++ *	1. The author is not responsible for the consequences of use of
++ *		this software, no matter how awful, even if they arise
++ *		from defects in it.
++ *
++ *	2. The origin of this software must not be misrepresented, either
++ *		by explicit claim or by omission.
++ *
++ *	3. Altered versions must be plainly marked as such, and must not
++ *		be misrepresented as being the original software.
++ *
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ */
++#include "regexp.h"
++#include "regmagic.h"
++#include <linux/string.h>
++
++
++#ifndef CHARBITS
++#define	UCHARAT(p)	((int)*(unsigned char *)(p))
++#else
++#define	UCHARAT(p)	((int)*(p)&CHARBITS)
++#endif
++
++#if 0
++//void regerror(char * s)
++//{
++//        printk("regexp(3): %s", s);
++//        /* NOTREACHED */
++//}
++#endif
++
++/*
++ - regsub - perform substitutions after a regexp match
++ */
++void
++regsub(regexp * prog, char * source, char * dest)
++{
++	register char *src;
++	register char *dst;
++	register char c;
++	register int no;
++	register int len;
++	
++	/* Not necessary and gcc doesn't like it -MLS */
++	/*extern char *strncpy();*/
++
++	if (prog == NULL || source == NULL || dest == NULL) {
++		regerror("NULL parm to regsub");
++		return;
++	}
++	if (UCHARAT(prog->program) != MAGIC) {
++		regerror("damaged regexp fed to regsub");
++		return;
++	}
++
++	src = source;
++	dst = dest;
++	while ((c = *src++) != '\0') {
++		if (c == '&')
++			no = 0;
++		else if (c == '\\' && '0' <= *src && *src <= '9')
++			no = *src++ - '0';
++		else
++			no = -1;
++
++		if (no < 0) {	/* Ordinary character. */
++			if (c == '\\' && (*src == '\\' || *src == '&'))
++				c = *src++;
++			*dst++ = c;
++		} else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
++			len = prog->endp[no] - prog->startp[no];
++			(void) strncpy(dst, prog->startp[no], len);
++			dst += len;
++			if (len != 0 && *(dst-1) == '\0') {	/* strncpy hit NUL. */
++				regerror("damaged match string");
++				return;
++			}
++		}
++	}
++	*dst++ = '\0';
++}
diff --git a/target/linux/generic-2.6/patches-2.6.22/101-netfilter_layer7_pktmatch.patch b/target/linux/generic-2.6/patches-2.6.22/101-netfilter_layer7_pktmatch.patch
new file mode 100644
index 0000000000..2d7f058cab
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/101-netfilter_layer7_pktmatch.patch
@@ -0,0 +1,108 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_layer7.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_layer7.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_layer7.h	2007-05-26 20:17:47.624408296 +0200
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_layer7.h	2007-05-26 20:17:48.729240336 +0200
+@@ -21,6 +21,7 @@
+     char protocol[MAX_PROTOCOL_LEN];
+     char invert:1;
+     char pattern[MAX_PATTERN_LEN];
++    char pkt;
+ };
+ 
+ #endif /* _IPT_LAYER7_H */
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_layer7.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_layer7.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_layer7.c	2007-05-26 20:17:47.626407992 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_layer7.c	2007-05-26 20:17:48.729240336 +0200
+@@ -296,33 +296,34 @@
+ 	}
+ }
+ 
+-/* add the new app data to the conntrack.  Return number of bytes added. */
+-static int add_data(struct ip_conntrack * master_conntrack,
+-			char * app_data, int appdatalen)
++static int add_datastr(char *target, int offset, char *app_data, int len)
+ {
+ 	int length = 0, i;
+-	int oldlength = master_conntrack->layer7.app_data_len;
+-
+-	// This is a fix for a race condition by Deti Fliegl. However, I'm not 
+-	// clear on whether the race condition exists or whether this really 
+-	// fixes it.  I might just be being dense... Anyway, if it's not really 
+-	// a fix, all it does is waste a very small amount of time.
+-	if(!master_conntrack->layer7.app_data) return 0;
++	if(!target) return 0;
+ 
+ 	/* Strip nulls. Make everything lower case (our regex lib doesn't
+ 	do case insensitivity).  Add it to the end of the current data. */
+-	for(i = 0; i < maxdatalen-oldlength-1 &&
+-		   i < appdatalen; i++) {
++	for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
+ 		if(app_data[i] != '\0') {
+-			master_conntrack->layer7.app_data[length+oldlength] =
++			target[length+offset] =
+ 				/* the kernel version of tolower mungs 'upper ascii' */
+ 				isascii(app_data[i])? tolower(app_data[i]) : app_data[i];
+ 			length++;
+ 		}
+ 	}
++	target[length+offset] = '\0';
+ 
+-	master_conntrack->layer7.app_data[length+oldlength] = '\0';
+-	master_conntrack->layer7.app_data_len = length + oldlength;
++	return length;
++}
++
++/* add the new app data to the conntrack.  Return number of bytes added. */
++static int add_data(struct ip_conntrack * master_conntrack,
++			char * app_data, int appdatalen)
++{
++	int length;
++
++	length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
++	master_conntrack->layer7.app_data_len += length;
+ 
+ 	return length;
+ }
+@@ -339,7 +340,7 @@
+ 	struct ipt_layer7_info * info = (struct ipt_layer7_info *)matchinfo;
+ 	enum ip_conntrack_info master_ctinfo, ctinfo;
+ 	struct ip_conntrack *master_conntrack, *conntrack;
+-	unsigned char * app_data;
++	unsigned char *app_data, *tmp_data;
+ 	unsigned int pattern_result, appdatalen;
+ 	regexp * comppattern;
+ 
+@@ -362,8 +363,8 @@
+ 		master_conntrack = master_ct(master_conntrack);
+ 
+ 	/* if we've classified it or seen too many packets */
+-	if(TOTAL_PACKETS > num_packets ||
+-	   master_conntrack->layer7.app_proto) {
++	if(!info->pkt && (TOTAL_PACKETS > num_packets ||
++		master_conntrack->layer7.app_proto)) {
+ 
+ 		pattern_result = match_no_append(conntrack, master_conntrack, ctinfo, master_ctinfo, info);
+ 
+@@ -394,6 +395,23 @@
+ 	comppattern = compile_and_cache(info->pattern, info->protocol);
+ 	spin_unlock_bh(&list_lock);
+ 
++	if (info->pkt) {
++		tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
++		if(!tmp_data){
++			if (net_ratelimit())
++				printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
++			return info->invert;
++		}
++
++		tmp_data[0] = '\0';
++		add_datastr(tmp_data, 0, app_data, appdatalen);
++		pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
++		kfree(tmp_data);
++		tmp_data = NULL;
++
++		return (pattern_result ^ info->invert);
++	}
++
+ 	/* On the first packet of a connection, allocate space for app data */
+ 	write_lock(&ct_lock);
+ 	if(TOTAL_PACKETS == 1 && !skb->cb[0] && !master_conntrack->layer7.app_data) {
diff --git a/target/linux/generic-2.6/patches-2.6.22/110-ipp2p_0.8.1rc1.patch b/target/linux/generic-2.6/patches-2.6.22/110-ipp2p_0.8.1rc1.patch
new file mode 100644
index 0000000000..9ff9e23606
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/110-ipp2p_0.8.1rc1.patch
@@ -0,0 +1,948 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_ipp2p.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_ipp2p.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_ipp2p.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_ipp2p.h	2007-05-26 20:21:54.586864296 +0200
+@@ -0,0 +1,31 @@
++#ifndef __IPT_IPP2P_H
++#define __IPT_IPP2P_H
++#define IPP2P_VERSION "0.8.1_rc1"
++
++struct ipt_p2p_info {
++    int cmd;
++    int debug;
++};
++
++#endif //__IPT_IPP2P_H
++
++#define SHORT_HAND_IPP2P	1 /* --ipp2p switch*/
++//#define SHORT_HAND_DATA		4 /* --ipp2p-data switch*/
++#define SHORT_HAND_NONE		5 /* no short hand*/
++
++#define IPP2P_EDK		(1 << 1)
++#define IPP2P_DATA_KAZAA	(1 << 2)
++#define IPP2P_DATA_EDK		(1 << 3)
++#define IPP2P_DATA_DC		(1 << 4)
++#define IPP2P_DC		(1 << 5)
++#define IPP2P_DATA_GNU		(1 << 6)
++#define IPP2P_GNU		(1 << 7)
++#define IPP2P_KAZAA		(1 << 8)
++#define IPP2P_BIT		(1 << 9)
++#define IPP2P_APPLE		(1 << 10)
++#define IPP2P_SOUL		(1 << 11)
++#define IPP2P_WINMX		(1 << 12)
++#define IPP2P_ARES		(1 << 13)
++#define IPP2P_MUTE		(1 << 14)
++#define IPP2P_WASTE		(1 << 15)
++#define IPP2P_XDCC		(1 << 16)
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_ipp2p.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_ipp2p.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_ipp2p.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_ipp2p.c	2007-05-26 20:21:54.587864144 +0200
+@@ -0,0 +1,881 @@
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ipt_ipp2p.h>
++#include <net/tcp.h>
++#include <net/udp.h>
++
++#define get_u8(X,O)  (*(__u8 *)(X + O))
++#define get_u16(X,O)  (*(__u16 *)(X + O))
++#define get_u32(X,O)  (*(__u32 *)(X + O))
++
++MODULE_AUTHOR("Eicke Friedrich/Klaus Degner <ipp2p@ipp2p.org>");
++MODULE_DESCRIPTION("An extension to iptables to identify P2P traffic.");
++MODULE_LICENSE("GPL");
++
++
++/*Search for UDP eDonkey/eMule/Kad commands*/
++int
++udp_search_edk (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    t += 8;
++
++	switch (t[0]) {
++		case 0xe3:
++		{	/*edonkey*/
++			switch (t[1])
++			{
++				/* client -> server status request */
++				case 0x96:
++					if (packet_len == 14) return ((IPP2P_EDK * 100) + 50);
++					break;
++				/* server -> client status request */
++				case 0x97: if (packet_len == 42) return ((IPP2P_EDK * 100) + 51);
++					break;
++						/* server description request */
++						/* e3 2a ff f0 .. | size == 6 */
++				case 0xa2: if ( (packet_len == 14) && ( get_u16(t,2) == __constant_htons(0xfff0) ) ) return ((IPP2P_EDK * 100) + 52);
++					break;
++						/* server description response */
++						/* e3 a3 ff f0 ..  | size > 40 && size < 200 */
++				//case 0xa3: return ((IPP2P_EDK * 100) + 53);
++				//	break;
++				case 0x9a: if (packet_len==26) return ((IPP2P_EDK * 100) + 54);
++					break;
++
++				case 0x92: if (packet_len==18) return ((IPP2P_EDK * 100) + 55);
++					break;
++			}
++			break;
++		}
++		case 0xe4:
++		{
++			switch (t[1])
++			{
++						/* e4 20 .. | size == 43 */
++				case 0x20: if ((packet_len == 43) && (t[2] != 0x00) && (t[34] != 0x00)) return ((IPP2P_EDK * 100) + 60);
++					break;
++						/* e4 00 .. 00 | size == 35 ? */
++				case 0x00: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 61);
++					break;
++						/* e4 10 .. 00 | size == 35 ? */
++				case 0x10: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 62);
++					break;
++						/* e4 18 .. 00 | size == 35 ? */
++				case 0x18: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 63);
++					break;
++						/* e4 52 .. | size = 44 */
++				case 0x52: if (packet_len == 44 ) return ((IPP2P_EDK * 100) + 64);
++					break;
++						/* e4 58 .. | size == 6 */
++				case 0x58: if (packet_len == 14 ) return ((IPP2P_EDK * 100) + 65);
++					break;
++						/* e4 59 .. | size == 2 */
++				case 0x59: if (packet_len == 10 )return ((IPP2P_EDK * 100) + 66);
++					break;
++					/* e4 28 .. | packet_len == 52,77,102,127... */
++				case 0x28: if (((packet_len-52) % 25) == 0) return ((IPP2P_EDK * 100) + 67);
++					break;
++					/* e4 50 xx xx | size == 4 */
++				case 0x50: if (packet_len == 12) return ((IPP2P_EDK * 100) + 68);
++					break;
++					/* e4 40 xx xx | size == 48 */
++				case 0x40: if (packet_len == 56) return ((IPP2P_EDK * 100) + 69);
++					break;
++			}
++			break;
++		}
++	} /* end of switch (t[0]) */
++    return 0;
++}/*udp_search_edk*/
++
++
++/*Search for UDP Gnutella commands*/
++int
++udp_search_gnu (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    t += 8;
++
++    if (memcmp(t, "GND", 3) == 0) return ((IPP2P_GNU * 100) + 51);
++    if (memcmp(t, "GNUTELLA ", 9) == 0) return ((IPP2P_GNU * 100) + 52);
++    return 0;
++}/*udp_search_gnu*/
++
++
++/*Search for UDP KaZaA commands*/
++int
++udp_search_kazaa (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++
++    if (t[packet_len-1] == 0x00){
++	t += (packet_len - 6);
++	if (memcmp(t, "KaZaA", 5) == 0) return (IPP2P_KAZAA * 100 +50);
++    }
++
++    return 0;
++}/*udp_search_kazaa*/
++
++/*Search for UDP DirectConnect commands*/
++int
++udp_search_directconnect (unsigned char *haystack, int packet_len)
++{
++    unsigned char *t = haystack;
++    if ((*(t + 8) == 0x24) && (*(t + packet_len - 1) == 0x7c)) {
++    	t+=8;
++    	if (memcmp(t, "SR ", 3) == 0)	 		return ((IPP2P_DC * 100) + 60);
++    	if (memcmp(t, "Ping ", 5) == 0)	 		return ((IPP2P_DC * 100) + 61);
++    }
++    return 0;
++}/*udp_search_directconnect*/
++
++
++
++/*Search for UDP BitTorrent commands*/
++int
++udp_search_bit (unsigned char *haystack, int packet_len)
++{
++	switch(packet_len)
++	{
++		case 24:
++			/* ^ 00 00 04 17 27 10 19 80 */
++			if ((ntohl(get_u32(haystack, 8)) == 0x00000417) && (ntohl(get_u32(haystack, 12)) == 0x27101980))
++				return (IPP2P_BIT * 100 + 50);
++			break;
++		case 44:
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000400) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
++				return (IPP2P_BIT * 100 + 51);
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000400))
++				return (IPP2P_BIT * 100 + 61);
++			break;
++		case 65:
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000404) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
++				return (IPP2P_BIT * 100 + 52);
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000404))
++				return (IPP2P_BIT * 100 + 62);
++			break;
++		case 67:
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000406) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
++				return (IPP2P_BIT * 100 + 53);
++			if (get_u32(haystack, 16) == __constant_htonl(0x00000406))
++				return (IPP2P_BIT * 100 + 63);
++			break;
++		case 211:
++			if (get_u32(haystack, 8) == __constant_htonl(0x00000405))
++				return (IPP2P_BIT * 100 + 54);
++			break;
++		case 29:
++			if ((get_u32(haystack, 8) == __constant_htonl(0x00000401)))
++				return (IPP2P_BIT * 100 + 55);
++			break;
++		case 52:
++			if (get_u32(haystack,8)  == __constant_htonl(0x00000827) &&
++			get_u32(haystack,12) == __constant_htonl(0x37502950))
++				return (IPP2P_BIT * 100 + 80);
++			break;
++		default:
++			/* this packet does not have a constant size */
++			if (packet_len >= 40 && get_u32(haystack, 16) == __constant_htonl(0x00000402) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
++				return (IPP2P_BIT * 100 + 56);
++			break;
++	}
++
++	/* some extra-bitcomet rules:
++	* "d1:" [a|r] "d2:id20:"
++	*/
++	if (packet_len > 30 && get_u8(haystack, 8) == 'd' && get_u8(haystack, 9) == '1' && get_u8(haystack, 10) == ':' )
++	{
++		if (get_u8(haystack, 11) == 'a' || get_u8(haystack, 11) == 'r')
++		{
++			if (memcmp(haystack+12,"d2:id20:",8)==0)
++				return (IPP2P_BIT * 100 + 57);
++		}
++	}
++
++#if 0
++	/* bitlord rules */
++	/* packetlen must be bigger than 40 */
++	/* first 4 bytes are zero */
++	if (packet_len > 40 && get_u32(haystack, 8) == 0x00000000)
++	{
++		/* first rule: 00 00 00 00 01 00 00 xx xx xx xx 00 00 00 00*/
++		if (get_u32(haystack, 12) == 0x00000000 &&
++		    get_u32(haystack, 16) == 0x00010000 &&
++		    get_u32(haystack, 24) == 0x00000000 )
++			return (IPP2P_BIT * 100 + 71);
++
++		/* 00 01 00 00 0d 00 00 xx xx xx xx 00 00 00 00*/
++		if (get_u32(haystack, 12) == 0x00000001 &&
++		    get_u32(haystack, 16) == 0x000d0000 &&
++		    get_u32(haystack, 24) == 0x00000000 )
++			return (IPP2P_BIT * 100 + 71);
++
++
++	}
++#endif
++
++    return 0;
++}/*udp_search_bit*/
++
++
++
++/*Search for Ares commands*/
++//#define IPP2P_DEBUG_ARES
++int
++search_ares (const unsigned char *payload, const u16 plen)
++//int search_ares (unsigned char *haystack, int packet_len, int head_len)
++{
++//	const unsigned char *t = haystack + head_len;
++
++	/* all ares packets start with  */
++	if (payload[1] == 0 && (plen - payload[0]) == 3)
++	{
++		switch (payload[2])
++		{
++			case 0x5a:
++				/* ares connect */
++				if ( plen == 6 && payload[5] == 0x05 ) return ((IPP2P_ARES * 100) + 1);
++				break;
++			case 0x09:
++				/* ares search, min 3 chars --> 14 bytes
++				 * lets define a search can be up to 30 chars --> max 34 bytes
++				 */
++				if ( plen >= 14 && plen <= 34 ) return ((IPP2P_ARES * 100) + 1);
++				break;
++#ifdef IPP2P_DEBUG_ARES
++			default:
++			printk(KERN_DEBUG "Unknown Ares command %x recognized, len: %u \n", (unsigned int) payload[2],plen);
++#endif /* IPP2P_DEBUG_ARES */
++		}
++	}
++
++#if 0
++	/* found connect packet: 03 00 5a 04 03 05 */
++	/* new version ares 1.8: 03 00 5a xx xx 05 */
++    if ((plen) == 6){	/* possible connect command*/
++	if ((payload[0] == 0x03) && (payload[1] == 0x00) && (payload[2] == 0x5a) && (payload[5] == 0x05))
++	    return ((IPP2P_ARES * 100) + 1);
++    }
++    if ((plen) == 60){	/* possible download command*/
++	if ((payload[59] == 0x0a) && (payload[58] == 0x0a)){
++	    if (memcmp(t, "PUSH SHA1:", 10) == 0) /* found download command */
++	    	return ((IPP2P_ARES * 100) + 2);
++	}
++    }
++#endif
++
++    return 0;
++} /*search_ares*/
++
++/*Search for SoulSeek commands*/
++int
++search_soul (const unsigned char *payload, const u16 plen)
++{
++//#define IPP2P_DEBUG_SOUL
++    /* match: xx xx xx xx | xx = sizeof(payload) - 4 */
++    if (get_u32(payload, 0) == (plen - 4)){
++	const __u32 m=get_u32(payload, 4);
++	/* match 00 yy yy 00, yy can be everything */
++        if ( get_u8(payload, 4) == 0x00 && get_u8(payload, 7) == 0x00 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++	printk(KERN_DEBUG "0: Soulseek command 0x%x recognized\n",get_u32(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 1);
++	}
++
++        /* next match: 01 yy 00 00 | yy can be everything */
++        if ( get_u8(payload, 4) == 0x01 && get_u16(payload, 6) == 0x0000 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++	printk(KERN_DEBUG "1: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 2);
++	}
++
++	/* other soulseek commandos are: 1-5,7,9,13-18,22,23,26,28,35-37,40-46,50,51,60,62-69,91,92,1001 */
++	/* try to do this in an intelligent way */
++	/* get all small commandos */
++	switch(m)
++	{
++		case 7:
++		case 9:
++		case 22:
++		case 23:
++		case 26:
++		case 28:
++		case 50:
++		case 51:
++		case 60:
++		case 91:
++		case 92:
++		case 1001:
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "2: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 3);
++	}
++
++	if (m > 0 && m < 6 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "3: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 4);
++	}
++	if (m > 12 && m < 19 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "4: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 5);
++	}
++
++	if (m > 34 && m < 38 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "5: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 6);
++	}
++
++	if (m > 39 && m < 47 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "6: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 7);
++	}
++
++	if (m > 61 && m < 70 )
++	{
++#ifdef IPP2P_DEBUG_SOUL
++		printk(KERN_DEBUG "7: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
++#endif /* IPP2P_DEBUG_SOUL */
++		return ((IPP2P_SOUL * 100) + 8);
++	}
++
++#ifdef IPP2P_DEBUG_SOUL
++	printk(KERN_DEBUG "unknown SOULSEEK command: 0x%x, first 16 bit: 0x%x, first 8 bit: 0x%x ,soulseek ???\n",get_u32(payload, 4),get_u16(payload, 4) >> 16,get_u8(payload, 4) >> 24);
++#endif /* IPP2P_DEBUG_SOUL */
++    }
++
++	/* match 14 00 00 00 01 yy 00 00 00 STRING(YY) 01 00 00 00 00 46|50 00 00 00 00 */
++	/* without size at the beginning !!! */
++	if ( get_u32(payload, 0) == 0x14 && get_u8(payload, 4) == 0x01 )
++	{
++		__u32 y=get_u32(payload, 5);
++		/* we need 19 chars + string */
++		if ( (y + 19) <= (plen) )
++		{
++			const unsigned char *w=payload+9+y;
++			if (get_u32(w, 0) == 0x01 && ( get_u16(w, 4) == 0x4600 || get_u16(w, 4) == 0x5000) && get_u32(w, 6) == 0x00);
++#ifdef IPP2P_DEBUG_SOUL
++	    		printk(KERN_DEBUG "Soulssek special client command recognized\n");
++#endif /* IPP2P_DEBUG_SOUL */
++	    		return ((IPP2P_SOUL * 100) + 9);
++		}
++	}
++    return 0;
++}
++
++
++/*Search for WinMX commands*/
++int
++search_winmx (const unsigned char *payload, const u16 plen)
++{
++//#define IPP2P_DEBUG_WINMX
++    if (((plen) == 4) && (memcmp(payload, "SEND", 4) == 0))  return ((IPP2P_WINMX * 100) + 1);
++    if (((plen) == 3) && (memcmp(payload, "GET", 3) == 0))  return ((IPP2P_WINMX * 100) + 2);
++    //if (packet_len < (head_len + 10)) return 0;
++    if (plen < 10) return 0;
++
++    if ((memcmp(payload, "SEND", 4) == 0) || (memcmp(payload, "GET", 3) == 0)){
++        u16 c=4;
++        const u16 end=plen-2;
++        u8 count=0;
++        while (c < end)
++        {
++        	if (payload[c]== 0x20 && payload[c+1] == 0x22)
++        	{
++        		c++;
++        		count++;
++        		if (count>=2) return ((IPP2P_WINMX * 100) + 3);
++        	}
++        	c++;
++        }
++    }
++
++    if ( plen == 149 && payload[0] == '8' )
++    {
++#ifdef IPP2P_DEBUG_WINMX
++    	printk(KERN_INFO "maybe WinMX\n");
++#endif
++    	if (get_u32(payload,17) == 0 && get_u32(payload,21) == 0 && get_u32(payload,25) == 0 &&
++//    	    get_u32(payload,33) == __constant_htonl(0x71182b1a) && get_u32(payload,37) == __constant_htonl(0x05050000) &&
++//    	    get_u32(payload,133) == __constant_htonl(0x31097edf) && get_u32(payload,145) == __constant_htonl(0xdcb8f792))
++    	    get_u16(payload,39) == 0 && get_u16(payload,135) == __constant_htons(0x7edf) && get_u16(payload,147) == __constant_htons(0xf792))
++
++    	{
++#ifdef IPP2P_DEBUG_WINMX
++    		printk(KERN_INFO "got WinMX\n");
++#endif
++    		return ((IPP2P_WINMX * 100) + 4);
++    	}
++    }
++    return 0;
++} /*search_winmx*/
++
++
++/*Search for appleJuice commands*/
++int
++search_apple (const unsigned char *payload, const u16 plen)
++{
++    if ( (plen > 7) && (payload[6] == 0x0d) && (payload[7] == 0x0a) && (memcmp(payload, "ajprot", 6) == 0))  return (IPP2P_APPLE * 100);
++
++    return 0;
++}
++
++
++/*Search for BitTorrent commands*/
++int
++search_bittorrent (const unsigned char *payload, const u16 plen)
++{
++    if (plen > 20)
++    {
++	/* test for match 0x13+"BitTorrent protocol" */
++	if (payload[0] == 0x13)
++	{
++		if (memcmp(payload+1, "BitTorrent protocol", 19) == 0) return (IPP2P_BIT * 100);
++	}
++
++	/* get tracker commandos, all starts with GET /
++	* then it can follow: scrape| announce
++	* and then ?hash_info=
++	*/
++	if (memcmp(payload,"GET /",5) == 0)
++	{
++		/* message scrape */
++		if ( memcmp(payload+5,"scrape?info_hash=",17)==0 ) return (IPP2P_BIT * 100 + 1);
++		/* message announce */
++		if ( memcmp(payload+5,"announce?info_hash=",19)==0 ) return (IPP2P_BIT * 100 + 2);
++	}
++    }
++    else
++    {
++    	/* bitcomet encryptes the first packet, so we have to detect another
++    	 * one later in the flow */
++    	 /* first try failed, too many missdetections */
++    	//if ( size == 5 && get_u32(t,0) == __constant_htonl(1) && t[4] < 3) return (IPP2P_BIT * 100 + 3);
++
++    	/* second try: block request packets */
++    	if ( plen == 17 && get_u32(payload,0) == __constant_htonl(0x0d) && payload[4] == 0x06 && get_u32(payload,13) == __constant_htonl(0x4000) ) return (IPP2P_BIT * 100 + 3);
++    }
++
++    return 0;
++}
++
++
++
++/*check for Kazaa get command*/
++int
++search_kazaa (const unsigned char *payload, const u16 plen)
++
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a) && memcmp(payload, "GET /.hash=", 11) == 0)
++	return (IPP2P_DATA_KAZAA * 100);
++
++    return 0;
++}
++
++
++/*check for gnutella get command*/
++int
++search_gnu (const unsigned char *payload, const u16 plen)
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++	if (memcmp(payload, "GET /get/", 9) == 0)	return ((IPP2P_DATA_GNU * 100) + 1);
++	if (memcmp(payload, "GET /uri-res/", 13) == 0) return ((IPP2P_DATA_GNU * 100) + 2);
++    }
++    return 0;
++}
++
++
++/*check for gnutella get commands and other typical data*/
++int
++search_all_gnu (const unsigned char *payload, const u16 plen)
++{
++
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++
++	if (memcmp(payload, "GNUTELLA CONNECT/", 17) == 0) return ((IPP2P_GNU * 100) + 1);
++	if (memcmp(payload, "GNUTELLA/", 9) == 0) return ((IPP2P_GNU * 100) + 2);
++
++
++	if ((memcmp(payload, "GET /get/", 9) == 0) || (memcmp(payload, "GET /uri-res/", 13) == 0))
++	{
++		u16 c=8;
++		const u16 end=plen-22;
++		while (c < end) {
++			if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Gnutella-", 11) == 0) || (memcmp(&payload[c+2], "X-Queue:", 8) == 0)))
++				return ((IPP2P_GNU * 100) + 3);
++			c++;
++		}
++	}
++    }
++    return 0;
++}
++
++
++/*check for KaZaA download commands and other typical data*/
++int
++search_all_kazaa (const unsigned char *payload, const u16 plen)
++{
++    if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
++    {
++
++	if (memcmp(payload, "GIVE ", 5) == 0) return ((IPP2P_KAZAA * 100) + 1);
++
++    	if (memcmp(payload, "GET /", 5) == 0) {
++		u16 c = 8;
++		const u16 end=plen-22;
++		while (c < end) {
++			if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Kazaa-Username: ", 18) == 0) || (memcmp(&payload[c+2], "User-Agent: PeerEnabler/", 24) == 0)))
++				return ((IPP2P_KAZAA * 100) + 2);
++			c++;
++		}
++	}
++    }
++    return 0;
++}
++
++/*fast check for edonkey file segment transfer command*/
++int
++search_edk (const unsigned char *payload, const u16 plen)
++{
++    if (payload[0] != 0xe3)
++	return 0;
++    else {
++	if (payload[5] == 0x47)
++	    return (IPP2P_DATA_EDK * 100);
++	else
++	    return 0;
++    }
++}
++
++
++
++/*intensive but slower search for some edonkey packets including size-check*/
++int
++search_all_edk (const unsigned char *payload, const u16 plen)
++{
++    if (payload[0] != 0xe3)
++	return 0;
++    else {
++	//t += head_len;
++	const u16 cmd = get_u16(payload, 1);
++	if (cmd == (plen - 5)) {
++	    switch (payload[5]) {
++		case 0x01: return ((IPP2P_EDK * 100) + 1);	/*Client: hello or Server:hello*/
++		case 0x4c: return ((IPP2P_EDK * 100) + 9);	/*Client: Hello-Answer*/
++	    }
++	}
++	return 0;
++     }
++}
++
++
++/*fast check for Direct Connect send command*/
++int
++search_dc (const unsigned char *payload, const u16 plen)
++{
++
++    if (payload[0] != 0x24 )
++	return 0;
++    else {
++	if (memcmp(&payload[1], "Send|", 5) == 0)
++	    return (IPP2P_DATA_DC * 100);
++	else
++	    return 0;
++    }
++
++}
++
++
++/*intensive but slower check for all direct connect packets*/
++int
++search_all_dc (const unsigned char *payload, const u16 plen)
++{
++//    unsigned char *t = haystack;
++
++    if (payload[0] == 0x24 && payload[plen-1] == 0x7c)
++    {
++    	const unsigned char *t=&payload[1];
++    		/* Client-Hub-Protocol */
++	if (memcmp(t, "Lock ", 5) == 0)	 		return ((IPP2P_DC * 100) + 1);
++	/* Client-Client-Protocol, some are already recognized by client-hub (like lock) */
++	if (memcmp(t, "MyNick ", 7) == 0)	 	return ((IPP2P_DC * 100) + 38);
++    }
++    return 0;
++}
++
++/*check for mute*/
++int
++search_mute (const unsigned char *payload, const u16 plen)
++{
++	if ( plen == 209 || plen == 345 || plen == 473 || plen == 609 || plen == 1121 )
++	{
++		//printk(KERN_DEBUG "size hit: %u",size);
++		if (memcmp(payload,"PublicKey: ",11) == 0 )
++		{
++			return ((IPP2P_MUTE * 100) + 0);
++
++/*			if (memcmp(t+size-14,"\x0aEndPublicKey\x0a",14) == 0)
++			{
++				printk(KERN_DEBUG "end pubic key hit: %u",size);
++
++			}*/
++		}
++	}
++	return 0;
++}
++
++
++/* check for xdcc */
++int
++search_xdcc (const unsigned char *payload, const u16 plen)
++{
++	/* search in small packets only */
++	if (plen > 20 && plen < 200 && payload[plen-1] == 0x0a && payload[plen-2] == 0x0d && memcmp(payload,"PRIVMSG ",8) == 0)
++	{
++
++		u16 x=10;
++		const u16 end=plen - 13;
++
++		/* is seems to be a irc private massage, chedck for xdcc command */
++		while (x < end)
++		{
++			if (payload[x] == ':')
++			{
++				if ( memcmp(&payload[x+1],"xdcc send #",11) == 0 )
++					return ((IPP2P_XDCC * 100) + 0);
++			}
++			x++;
++		}
++	}
++	return 0;
++}
++
++/* search for waste */
++int search_waste(const unsigned char *payload, const u16 plen)
++{
++	if ( plen >= 8 && memcmp(payload,"GET.sha1:",9) == 0)
++		return ((IPP2P_WASTE * 100) + 0);
++
++	return 0;
++}
++
++
++static struct {
++    int command;
++    __u8 short_hand;			/*for fucntions included in short hands*/
++    int packet_len;
++    int (*function_name) (const unsigned char *, const u16);
++} matchlist[] = {
++    {IPP2P_EDK,SHORT_HAND_IPP2P,20, &search_all_edk},
++//    {IPP2P_DATA_KAZAA,SHORT_HAND_DATA,200, &search_kazaa},
++//    {IPP2P_DATA_EDK,SHORT_HAND_DATA,60, &search_edk},
++//    {IPP2P_DATA_DC,SHORT_HAND_DATA,26, &search_dc},
++    {IPP2P_DC,SHORT_HAND_IPP2P,5, search_all_dc},
++//    {IPP2P_DATA_GNU,SHORT_HAND_DATA,40, &search_gnu},
++    {IPP2P_GNU,SHORT_HAND_IPP2P,5, &search_all_gnu},
++    {IPP2P_KAZAA,SHORT_HAND_IPP2P,5, &search_all_kazaa},
++    {IPP2P_BIT,SHORT_HAND_IPP2P,20, &search_bittorrent},
++    {IPP2P_APPLE,SHORT_HAND_IPP2P,5, &search_apple},
++    {IPP2P_SOUL,SHORT_HAND_IPP2P,5, &search_soul},
++    {IPP2P_WINMX,SHORT_HAND_IPP2P,2, &search_winmx},
++    {IPP2P_ARES,SHORT_HAND_IPP2P,5, &search_ares},
++    {IPP2P_MUTE,SHORT_HAND_NONE,200, &search_mute},
++    {IPP2P_WASTE,SHORT_HAND_NONE,5, &search_waste},
++    {IPP2P_XDCC,SHORT_HAND_NONE,5, &search_xdcc},
++    {0,0,0,NULL}
++};
++
++
++static struct {
++    int command;
++    __u8 short_hand;			/*for fucntions included in short hands*/
++    int packet_len;
++    int (*function_name) (unsigned char *, int);
++} udp_list[] = {
++    {IPP2P_KAZAA,SHORT_HAND_IPP2P,14, &udp_search_kazaa},
++    {IPP2P_BIT,SHORT_HAND_IPP2P,23, &udp_search_bit},
++    {IPP2P_GNU,SHORT_HAND_IPP2P,11, &udp_search_gnu},
++    {IPP2P_EDK,SHORT_HAND_IPP2P,9, &udp_search_edk},
++    {IPP2P_DC,SHORT_HAND_IPP2P,12, &udp_search_directconnect},
++    {0,0,0,NULL}
++};
++
++
++static int
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++      const struct xt_match *match,
++#endif
++      const void *matchinfo,
++      int offset,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++      unsigned int protoff,
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++      const void *hdr,
++      u_int16_t datalen,
++#endif
++      int *hotdrop)
++{
++    const struct ipt_p2p_info *info = matchinfo;
++    unsigned char  *haystack;
++    struct iphdr *ip = ip_hdr(skb);
++    int p2p_result = 0, i = 0;
++//    int head_len;
++    int hlen = ntohs(ip->tot_len)-(ip->ihl*4);	/*hlen = packet-data length*/
++
++    /*must not be a fragment*/
++    if (offset) {
++	if (info->debug) printk("IPP2P.match: offset found %i \n",offset);
++	return 0;
++    }
++
++    /*make sure that skb is linear*/
++    if(skb_is_nonlinear(skb)){
++	if (info->debug) printk("IPP2P.match: nonlinear skb found\n");
++	return 0;
++    }
++
++
++    haystack=(char *)ip+(ip->ihl*4);		/*haystack = packet data*/
++
++    switch (ip->protocol){
++	case IPPROTO_TCP:		/*what to do with a TCP packet*/
++	{
++	    struct tcphdr *tcph = (void *) ip + ip->ihl * 4;
++
++	    if (tcph->fin) return 0;  /*if FIN bit is set bail out*/
++	    if (tcph->syn) return 0;  /*if SYN bit is set bail out*/
++	    if (tcph->rst) return 0;  /*if RST bit is set bail out*/
++
++	    haystack += tcph->doff * 4; /*get TCP-Header-Size*/
++	    hlen -= tcph->doff * 4;
++	    while (matchlist[i].command) {
++		if ((((info->cmd & matchlist[i].command) == matchlist[i].command) ||
++		    ((info->cmd & matchlist[i].short_hand) == matchlist[i].short_hand)) &&
++		    (hlen > matchlist[i].packet_len)) {
++			    p2p_result = matchlist[i].function_name(haystack, hlen);
++			    if (p2p_result)
++			    {
++				if (info->debug) printk("IPP2P.debug:TCP-match: %i from: %u.%u.%u.%u:%i to: %u.%u.%u.%u:%i Length: %i\n",
++				    p2p_result, NIPQUAD(ip->saddr),ntohs(tcph->source), NIPQUAD(ip->daddr),ntohs(tcph->dest),hlen);
++				return p2p_result;
++    			    }
++    		}
++	    i++;
++	    }
++	    return p2p_result;
++	}
++
++	case IPPROTO_UDP:		/*what to do with an UDP packet*/
++	{
++	    struct udphdr *udph = (void *) ip + ip->ihl * 4;
++
++	    while (udp_list[i].command){
++		if ((((info->cmd & udp_list[i].command) == udp_list[i].command) ||
++		    ((info->cmd & udp_list[i].short_hand) == udp_list[i].short_hand)) &&
++		    (hlen > udp_list[i].packet_len)) {
++			    p2p_result = udp_list[i].function_name(haystack, hlen);
++			    if (p2p_result){
++				if (info->debug) printk("IPP2P.debug:UDP-match: %i from: %u.%u.%u.%u:%i to: %u.%u.%u.%u:%i Length: %i\n",
++				    p2p_result, NIPQUAD(ip->saddr),ntohs(udph->source), NIPQUAD(ip->daddr),ntohs(udph->dest),hlen);
++				return p2p_result;
++			    }
++		}
++	    i++;
++	    }
++	    return p2p_result;
++	}
++
++	default: return 0;
++    }
++}
++
++
++
++static int
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++	    const void *ip,
++	    const struct xt_match *match,
++#else
++            const struct ipt_ip *ip,
++#endif
++	    void *matchinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	    unsigned int matchsize,
++#endif
++	    unsigned int hook_mask)
++{
++        /* Must specify -p tcp */
++/*    if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) {
++ *	printk("ipp2p: Only works on TCP packets, use -p tcp\n");
++ *	return 0;
++ *    }*/
++    return 1;
++}
++
++
++
++
++static struct ipt_match ipp2p_match = {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++	{ NULL, NULL },
++	"ipp2p",
++	&match,
++	&checkentry,
++	NULL,
++	THIS_MODULE
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++	.name		= "ipp2p",
++	.match		= &match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++	.matchsize	= sizeof(struct ipt_p2p_info),
++#endif
++	.checkentry	= &checkentry,
++	.me		= THIS_MODULE,
++#endif
++};
++
++
++static int __init init(void)
++{
++    printk(KERN_INFO "IPP2P v%s loading\n", IPP2P_VERSION);
++    return xt_register_match(&ipp2p_match);
++}
++
++static void __exit fini(void)
++{
++    xt_unregister_match(&ipp2p_match);
++    printk(KERN_INFO "IPP2P v%s unloaded\n", IPP2P_VERSION);
++}
++
++module_init(init);
++module_exit(fini);
++
++
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig	2007-05-26 20:17:47.626407992 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig	2007-05-26 20:21:54.587864144 +0200
+@@ -81,6 +81,12 @@
+ 	help
+ 	  Say Y to get lots of debugging output.
+ 
++config IP_NF_MATCH_IPP2P
++	tristate "IPP2P"
++	depends on IP_NF_IPTABLES
++	help
++	  Module for matching traffic of various Peer-to-Peer applications
++
+ config IP_NF_MATCH_TOS
+ 	tristate "TOS match support"
+ 	depends on IP_NF_IPTABLES
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Makefile linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Makefile	2007-05-26 20:17:47.638406168 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile	2007-05-26 20:21:54.588863992 +0200
+@@ -49,7 +49,7 @@
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
+ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+-
++obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
+ obj-$(CONFIG_IP_NF_MATCH_LAYER7) += ipt_layer7.o
+ 
+ # targets
diff --git a/target/linux/generic-2.6/patches-2.6.22/120-openswan-2.4.0.kernel-2.6-natt.patch b/target/linux/generic-2.6/patches-2.6.22/120-openswan-2.4.0.kernel-2.6-natt.patch
new file mode 100644
index 0000000000..4d9dc9500e
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/120-openswan-2.4.0.kernel-2.6-natt.patch
@@ -0,0 +1,169 @@
+diff -urN linux-2.6.21.1.old/include/net/xfrmudp.h linux-2.6.21.1.dev/include/net/xfrmudp.h
+--- linux-2.6.21.1.old/include/net/xfrmudp.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/net/xfrmudp.h	2007-05-26 20:24:53.933599448 +0200
+@@ -0,0 +1,10 @@
++/*
++ * pointer to function for type that xfrm4_input wants, to permit
++ * decoupling of XFRM from udp.c
++ */
++#define HAVE_XFRM4_UDP_REGISTER
++
++typedef int (*xfrm4_rcv_encap_t)(struct sk_buff *skb, __u16 encap_type);
++extern int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
++				      , xfrm4_rcv_encap_t *oldfunc);
++extern int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func);
+diff -urN linux-2.6.21.1.old/net/ipv4/Kconfig linux-2.6.21.1.dev/net/ipv4/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/Kconfig	2007-05-26 20:24:53.965594584 +0200
+@@ -266,6 +266,12 @@
+ 	  Network), but can be distributed all over the Internet. If you want
+ 	  to do that, say Y here and to "IP multicast routing" below.
+ 
++config IPSEC_NAT_TRAVERSAL
++	bool "IPSEC NAT-Traversal (KLIPS compatible)"
++	depends on INET
++	---help---
++          Includes support for RFC3947/RFC3948 NAT-Traversal of ESP over UDP.
++
+ config IP_MROUTE
+ 	bool "IP: multicast routing"
+ 	depends on IP_MULTICAST
+diff -urN linux-2.6.21.1.old/net/ipv4/udp.c linux-2.6.21.1.dev/net/ipv4/udp.c
+--- linux-2.6.21.1.old/net/ipv4/udp.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/udp.c	2007-05-26 20:24:53.966594432 +0200
+@@ -101,12 +101,15 @@
+ #include <net/route.h>
+ #include <net/checksum.h>
+ #include <net/xfrm.h>
++#include <net/xfrmudp.h>
+ #include "udp_impl.h"
+ 
+ /*
+  *	Snmp MIB for the UDP layer
+  */
+ 
++static xfrm4_rcv_encap_t xfrm4_rcv_encap_func;
++
+ DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
+ 
+ struct hlist_head udp_hash[UDP_HTABLE_SIZE];
+@@ -1008,6 +1011,42 @@
+ 	return 0;
+ }
+ 
++#if defined(CONFIG_XFRM) || defined(CONFIG_IPSEC_NAT_TRAVERSAL)
++
++/* if XFRM isn't a module, then register it directly. */
++#if 0 && !defined(CONFIG_XFRM_MODULE) && !defined(CONFIG_IPSEC_NAT_TRAVERSAL)
++static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = xfrm4_rcv_encap;
++#else
++static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = NULL;
++#endif
++
++int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
++			       , xfrm4_rcv_encap_t *oldfunc)
++{
++  if(oldfunc != NULL) {
++    *oldfunc = xfrm4_rcv_encap_func;
++  }
++
++#if 0
++  if(xfrm4_rcv_encap_func != NULL)
++    return -1;
++#endif
++
++  xfrm4_rcv_encap_func = func;
++  return 0;
++}
++
++int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func)
++{
++  if(xfrm4_rcv_encap_func != func)
++    return -1;
++
++  xfrm4_rcv_encap_func = NULL;
++  return 0;
++}
++#endif /* CONFIG_XFRM_MODULE || CONFIG_IPSEC_NAT_TRAVERSAL */
++
++
+ /* return:
+  * 	1  if the UDP system should process it
+  *	0  if we should drop this packet
+@@ -1015,7 +1054,7 @@
+  */
+ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
+ {
+-#ifndef CONFIG_XFRM
++#if !defined(CONFIG_XFRM) && !defined(CONFIG_IPSEC_NAT_TRAVERSAL)
+ 	return 1;
+ #else
+ 	struct udp_sock *up = udp_sk(sk);
+@@ -1030,11 +1069,11 @@
+ 	/* if we're overly short, let UDP handle it */
+ 	len = skb->len - sizeof(struct udphdr);
+ 	if (len <= 0)
+-		return 1;
++		return 2;
+ 
+ 	/* if this is not encapsulated socket, then just return now */
+ 	if (!encap_type)
+-		return 1;
++		return 3;
+ 
+ 	/* If this is a paged skb, make sure we pull up
+ 	 * whatever data we need to look at. */
+@@ -1057,7 +1096,7 @@
+ 			len = sizeof(struct udphdr);
+ 		} else
+ 			/* Must be an IKE packet.. pass it through */
+-			return 1;
++			return 4;
+ 		break;
+ 	case UDP_ENCAP_ESPINUDP_NON_IKE:
+ 		/* Check if this is a keepalive packet.  If so, eat it. */
+@@ -1070,7 +1109,7 @@
+ 			len = sizeof(struct udphdr) + 2 * sizeof(u32);
+ 		} else
+ 			/* Must be an IKE packet.. pass it through */
+-			return 1;
++			return 5;
+ 		break;
+ 	}
+ 
+@@ -1081,6 +1120,8 @@
+ 	 */
+ 	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ 		return 0;
++	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++		return 0;
+ 
+ 	/* Now we can update and verify the packet length... */
+ 	iph = ip_hdr(skb);
+@@ -1145,9 +1186,13 @@
+ 			return 0;
+ 		}
+ 		if (ret < 0) {
+-			/* process the ESP packet */
+-			ret = xfrm4_rcv_encap(skb, up->encap_type);
+-			UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
++			if(xfrm4_rcv_encap_func != NULL) {
++			  ret = (*xfrm4_rcv_encap_func)(skb, up->encap_type);
++			  UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
++			} else {
++			  UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
++			  ret = 1;
++			}
+ 			return -ret;
+ 		}
+ 		/* FALLTHROUGH -- it's a UDP Packet */
+@@ -1847,3 +1892,9 @@
+ EXPORT_SYMBOL(udp_proc_register);
+ EXPORT_SYMBOL(udp_proc_unregister);
+ #endif
++
++#if defined(CONFIG_IPSEC_NAT_TRAVERSAL)
++EXPORT_SYMBOL(udp4_register_esp_rcvencap);
++EXPORT_SYMBOL(udp4_unregister_esp_rcvencap);
++#endif
++
diff --git a/target/linux/generic-2.6/patches-2.6.22/130-netfilter-ipset.patch b/target/linux/generic-2.6/patches-2.6.22/130-netfilter-ipset.patch
new file mode 100644
index 0000000000..f52d0ea730
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/130-netfilter-ipset.patch
@@ -0,0 +1,6625 @@
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set.h	2007-06-08 16:29:31.825808000 -0500
+@@ -0,0 +1,498 @@
++#ifndef _IP_SET_H
++#define _IP_SET_H
++
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ *                         Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++#if 0
++#define IP_SET_DEBUG
++#endif
++
++/*
++ * A sockopt of such quality has hardly ever been seen before on the open
++ * market!  This little beauty, hardly ever used: above 64, so it's
++ * traditionally used for firewalling, not touched (even once!) by the
++ * 2.0, 2.2 and 2.4 kernels!
++ *
++ * Comes with its own certificate of authenticity, valid anywhere in the
++ * Free world!
++ *
++ * Rusty, 19.4.2000
++ */
++#define SO_IP_SET 		83
++
++/*
++ * Heavily modify by Joakim Axelsson 08.03.2002
++ * - Made it more modulebased
++ *
++ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
++ * - bindings added
++ * - in order to "deal with" backward compatibility, renamed to ipset
++ */
++
++/* 
++ * Used so that the kernel module and ipset-binary can match their versions 
++ */
++#define IP_SET_PROTOCOL_VERSION 2
++
++#define IP_SET_MAXNAMELEN 32	/* set names and set typenames */
++
++/* Lets work with our own typedef for representing an IP address.
++ * We hope to make the code more portable, possibly to IPv6...
++ *
++ * The representation works in HOST byte order, because most set types
++ * will perform arithmetic operations and compare operations.
++ * 
++ * For now the type is an uint32_t.
++ *
++ * Make sure to ONLY use the functions when translating and parsing
++ * in order to keep the host byte order and make it more portable:
++ *  parse_ip()
++ *  parse_mask()
++ *  parse_ipandmask()
++ *  ip_tostring()
++ * (Joakim: where are they???)
++ */
++
++typedef uint32_t ip_set_ip_t;
++
++/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
++ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
++ */
++typedef uint16_t ip_set_id_t;
++
++#define IP_SET_INVALID_ID	65535
++
++/* How deep we follow bindings */
++#define IP_SET_MAX_BINDINGS	6
++
++/*
++ * Option flags for kernel operations (ipt_set_info)
++ */
++#define IPSET_SRC 		0x01	/* Source match/add */
++#define IPSET_DST		0x02	/* Destination match/add */
++#define IPSET_MATCH_INV		0x04	/* Inverse matching */
++
++/*
++ * Set features
++ */
++#define IPSET_TYPE_IP		0x01	/* IP address type of set */
++#define IPSET_TYPE_PORT		0x02	/* Port type of set */
++#define IPSET_DATA_SINGLE	0x04	/* Single data storage */
++#define IPSET_DATA_DOUBLE	0x08	/* Double data storage */
++
++/* Reserved keywords */
++#define IPSET_TOKEN_DEFAULT	":default:"
++#define IPSET_TOKEN_ALL		":all:"
++
++/* SO_IP_SET operation constants, and their request struct types.
++ *
++ * Operation ids:
++ *	  0-99:	 commands with version checking
++ *	100-199: add/del/test/bind/unbind
++ *	200-299: list, save, restore
++ */
++
++/* Single shot operations: 
++ * version, create, destroy, flush, rename and swap 
++ *
++ * Sets are identified by name.
++ */
++
++#define IP_SET_REQ_STD		\
++	unsigned op;		\
++	unsigned version;	\
++	char name[IP_SET_MAXNAMELEN]
++
++#define IP_SET_OP_CREATE	0x00000001	/* Create a new (empty) set */
++struct ip_set_req_create {
++	IP_SET_REQ_STD;
++	char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_DESTROY	0x00000002	/* Remove a (empty) set */
++struct ip_set_req_std {
++	IP_SET_REQ_STD;
++};
++
++#define IP_SET_OP_FLUSH		0x00000003	/* Remove all IPs in a set */
++/* Uses ip_set_req_std */
++
++#define IP_SET_OP_RENAME	0x00000004	/* Rename a set */
++/* Uses ip_set_req_create */
++
++#define IP_SET_OP_SWAP		0x00000005	/* Swap two sets */
++/* Uses ip_set_req_create */
++
++union ip_set_name_index {
++	char name[IP_SET_MAXNAMELEN];
++	ip_set_id_t index;
++};
++
++#define IP_SET_OP_GET_BYNAME	0x00000006	/* Get set index by name */
++struct ip_set_req_get_set {
++	unsigned op;
++	unsigned version;
++	union ip_set_name_index set;
++};
++
++#define IP_SET_OP_GET_BYINDEX	0x00000007	/* Get set name by index */
++/* Uses ip_set_req_get_set */
++
++#define IP_SET_OP_VERSION	0x00000100	/* Ask kernel version */
++struct ip_set_req_version {
++	unsigned op;
++	unsigned version;
++};
++
++/* Double shots operations: 
++ * add, del, test, bind and unbind.
++ *
++ * First we query the kernel to get the index and type of the target set,
++ * then issue the command. Validity of IP is checked in kernel in order
++ * to minimalize sockopt operations.
++ */
++
++/* Get minimal set data for add/del/test/bind/unbind IP */
++#define IP_SET_OP_ADT_GET	0x00000010	/* Get set and type */
++struct ip_set_req_adt_get {
++	unsigned op;
++	unsigned version;
++	union ip_set_name_index set;
++	char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_REQ_BYINDEX	\
++	unsigned op;		\
++	ip_set_id_t index;
++
++struct ip_set_req_adt {
++	IP_SET_REQ_BYINDEX;
++};
++
++#define IP_SET_OP_ADD_IP	0x00000101	/* Add an IP to a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_DEL_IP	0x00000102	/* Remove an IP from a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_TEST_IP	0x00000103	/* Test an IP in a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_BIND_SET	0x00000104	/* Bind an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++struct ip_set_req_bind {
++	IP_SET_REQ_BYINDEX;
++	char binding[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_UNBIND_SET	0x00000105	/* Unbind an IP from a set */
++/* Uses ip_set_req_bind, with type speficic addage 
++ * index = 0 means unbinding for all sets */
++
++#define IP_SET_OP_TEST_BIND_SET	0x00000106	/* Test binding an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++
++/* Multiple shots operations: list, save, restore.
++ *
++ * - check kernel version and query the max number of sets
++ * - get the basic information on all sets
++ *   and size required for the next step
++ * - get actual set data: header, data, bindings
++ */
++
++/* Get max_sets and the index of a queried set
++ */
++#define IP_SET_OP_MAX_SETS	0x00000020
++struct ip_set_req_max_sets {
++	unsigned op;
++	unsigned version;
++	ip_set_id_t max_sets;		/* max_sets */
++	ip_set_id_t sets;		/* real number of sets */
++	union ip_set_name_index set;	/* index of set if name used */
++};
++
++/* Get the id and name of the sets plus size for next step */
++#define IP_SET_OP_LIST_SIZE	0x00000201
++#define IP_SET_OP_SAVE_SIZE	0x00000202
++struct ip_set_req_setnames {
++	unsigned op;
++	ip_set_id_t index;		/* set to list/save */
++	size_t size;			/* size to get setdata/bindings */
++	/* followed by sets number of struct ip_set_name_list */
++};
++
++struct ip_set_name_list {
++	char name[IP_SET_MAXNAMELEN];
++	char typename[IP_SET_MAXNAMELEN];
++	ip_set_id_t index;
++	ip_set_id_t id;
++};
++
++/* The actual list operation */
++#define IP_SET_OP_LIST		0x00000203
++struct ip_set_req_list {
++	IP_SET_REQ_BYINDEX;
++	/* sets number of struct ip_set_list in reply */ 
++};
++
++struct ip_set_list {
++	ip_set_id_t index;
++	ip_set_id_t binding;
++	u_int32_t ref;
++	size_t header_size;	/* Set header data of header_size */
++	size_t members_size;	/* Set members data of members_size */
++	size_t bindings_size;	/* Set bindings data of bindings_size */
++};
++
++struct ip_set_hash_list {
++	ip_set_ip_t ip;
++	ip_set_id_t binding;
++};
++
++/* The save operation */
++#define IP_SET_OP_SAVE		0x00000204
++/* Uses ip_set_req_list, in the reply replaced by
++ * sets number of struct ip_set_save plus a marker
++ * ip_set_save followed by ip_set_hash_save structures.
++ */
++struct ip_set_save {
++	ip_set_id_t index;
++	ip_set_id_t binding;
++	size_t header_size;	/* Set header data of header_size */
++	size_t members_size;	/* Set members data of members_size */
++};
++
++/* At restoring, ip == 0 means default binding for the given set: */
++struct ip_set_hash_save {
++	ip_set_ip_t ip;
++	ip_set_id_t id;
++	ip_set_id_t binding;
++};
++
++/* The restore operation */
++#define IP_SET_OP_RESTORE	0x00000205
++/* Uses ip_set_req_setnames followed by ip_set_restore structures
++ * plus a marker ip_set_restore, followed by ip_set_hash_save 
++ * structures.
++ */
++struct ip_set_restore {
++	char name[IP_SET_MAXNAMELEN];
++	char typename[IP_SET_MAXNAMELEN];
++	ip_set_id_t index;
++	size_t header_size;	/* Create data of header_size */
++	size_t members_size;	/* Set members data of members_size */
++};
++
++static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
++{
++	return 4 * ((((b - a + 8) / 8) + 3) / 4);
++}
++
++#ifdef __KERNEL__
++
++#define ip_set_printk(format, args...) 			\
++	do {							\
++		printk("%s: %s: ", __FILE__, __FUNCTION__);	\
++		printk(format "\n" , ## args);			\
++	} while (0)
++
++#if defined(IP_SET_DEBUG)
++#define DP(format, args...) 					\
++	do {							\
++		printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
++		printk(format "\n" , ## args);			\
++	} while (0)
++#define IP_SET_ASSERT(x)					\
++	do {							\
++		if (!(x))					\
++			printk("IP_SET_ASSERT: %s:%i(%s)\n",	\
++				__FILE__, __LINE__, __FUNCTION__); \
++	} while (0)
++#else
++#define DP(format, args...)
++#define IP_SET_ASSERT(x)
++#endif
++
++struct ip_set;
++
++/*
++ * The ip_set_type definition - one per set type, e.g. "ipmap".
++ *
++ * Each individual set has a pointer, set->type, going to one
++ * of these structures. Function pointers inside the structure implement
++ * the real behaviour of the sets.
++ *
++ * If not mentioned differently, the implementation behind the function
++ * pointers of a set_type, is expected to return 0 if ok, and a negative
++ * errno (e.g. -EINVAL) on error.
++ */
++struct ip_set_type {
++	struct list_head list;	/* next in list of set types */
++
++	/* test for IP in set (kernel: iptables -m set src|dst)
++	 * return 0 if not in set, 1 if in set.
++	 */
++	int (*testip_kernel) (struct ip_set *set,
++			      const struct sk_buff * skb, 
++			      ip_set_ip_t *ip,
++			      const u_int32_t *flags,
++			      unsigned char index);
++
++	/* test for IP in set (userspace: ipset -T set IP)
++	 * return 0 if not in set, 1 if in set.
++	 */
++	int (*testip) (struct ip_set *set,
++		       const void *data, size_t size,
++		       ip_set_ip_t *ip);
++
++	/*
++	 * Size of the data structure passed by when
++	 * adding/deletin/testing an entry.
++	 */
++	size_t reqsize;
++
++	/* Add IP into set (userspace: ipset -A set IP)
++	 * Return -EEXIST if the address is already in the set,
++	 * and -ERANGE if the address lies outside the set bounds.
++	 * If the address was not already in the set, 0 is returned.
++	 */
++	int (*addip) (struct ip_set *set, 
++		      const void *data, size_t size,
++		      ip_set_ip_t *ip);
++
++	/* Add IP into set (kernel: iptables ... -j SET set src|dst)
++	 * Return -EEXIST if the address is already in the set,
++	 * and -ERANGE if the address lies outside the set bounds.
++	 * If the address was not already in the set, 0 is returned.
++	 */
++	int (*addip_kernel) (struct ip_set *set,
++			     const struct sk_buff * skb, 
++			     ip_set_ip_t *ip,
++			     const u_int32_t *flags,
++			     unsigned char index);
++
++	/* remove IP from set (userspace: ipset -D set --entry x)
++	 * Return -EEXIST if the address is NOT in the set,
++	 * and -ERANGE if the address lies outside the set bounds.
++	 * If the address really was in the set, 0 is returned.
++	 */
++	int (*delip) (struct ip_set *set, 
++		      const void *data, size_t size,
++		      ip_set_ip_t *ip);
++
++	/* remove IP from set (kernel: iptables ... -j SET --entry x)
++	 * Return -EEXIST if the address is NOT in the set,
++	 * and -ERANGE if the address lies outside the set bounds.
++	 * If the address really was in the set, 0 is returned.
++	 */
++	int (*delip_kernel) (struct ip_set *set,
++			     const struct sk_buff * skb, 
++			     ip_set_ip_t *ip,
++			     const u_int32_t *flags,
++			     unsigned char index);
++
++	/* new set creation - allocated type specific items
++	 */
++	int (*create) (struct ip_set *set,
++		       const void *data, size_t size);
++
++	/* retry the operation after successfully tweaking the set
++	 */
++	int (*retry) (struct ip_set *set);
++
++	/* set destruction - free type specific items
++	 * There is no return value.
++	 * Can be called only when child sets are destroyed.
++	 */
++	void (*destroy) (struct ip_set *set);
++
++	/* set flushing - reset all bits in the set, or something similar.
++	 * There is no return value.
++	 */
++	void (*flush) (struct ip_set *set);
++
++	/* Listing: size needed for header
++	 */
++	size_t header_size;
++
++	/* Listing: Get the header
++	 *
++	 * Fill in the information in "data".
++	 * This function is always run after list_header_size() under a 
++	 * writelock on the set. Therefor is the length of "data" always 
++	 * correct. 
++	 */
++	void (*list_header) (const struct ip_set *set, 
++			     void *data);
++
++	/* Listing: Get the size for the set members
++	 */
++	int (*list_members_size) (const struct ip_set *set);
++
++	/* Listing: Get the set members
++	 *
++	 * Fill in the information in "data".
++	 * This function is always run after list_member_size() under a 
++	 * writelock on the set. Therefor is the length of "data" always 
++	 * correct. 
++	 */
++	void (*list_members) (const struct ip_set *set,
++			      void *data);
++
++	char typename[IP_SET_MAXNAMELEN];
++	unsigned char features;
++	int protocol_version;
++
++	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
++	struct module *me;
++};
++
++extern int ip_set_register_set_type(struct ip_set_type *set_type);
++extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
++
++/* A generic ipset */
++struct ip_set {
++	char name[IP_SET_MAXNAMELEN];	/* the name of the set */
++	rwlock_t lock;			/* lock for concurrency control */
++	ip_set_id_t id;			/* set id for swapping */
++	ip_set_id_t binding;		/* default binding for the set */
++	atomic_t ref;			/* in kernel and in hash references */
++	struct ip_set_type *type; 	/* the set types */
++	void *data;			/* pooltype specific data */
++};
++
++/* Structure to bind set elements to sets */
++struct ip_set_hash {
++	struct list_head list;		/* list of clashing entries in hash */
++	ip_set_ip_t ip;			/* ip from set */
++	ip_set_id_t id;			/* set id */
++	ip_set_id_t binding;		/* set we bind the element to */
++};
++
++/* register and unregister set references */
++extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
++extern void ip_set_put(ip_set_id_t id);
++
++/* API for iptables set match, and SET target */
++extern void ip_set_addip_kernel(ip_set_id_t id,
++				const struct sk_buff *skb,
++				const u_int32_t *flags);
++extern void ip_set_delip_kernel(ip_set_id_t id,
++				const struct sk_buff *skb,
++				const u_int32_t *flags);
++extern int ip_set_testip_kernel(ip_set_id_t id,
++				const struct sk_buff *skb,
++				const u_int32_t *flags);
++
++#endif				/* __KERNEL__ */
++
++#endif /*_IP_SET_H*/
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iphash.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iphash.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iphash.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,30 @@
++#ifndef __IP_SET_IPHASH_H
++#define __IP_SET_IPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iphash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iphash {
++	ip_set_ip_t *members;		/* the iphash proper */
++	uint32_t elements;		/* number of elements */
++	uint32_t hashsize;		/* hash size */
++	uint16_t probes;		/* max number of probes  */
++	uint16_t resize;		/* resize factor in percent */
++	ip_set_ip_t netmask;		/* netmask */
++	void *initval[0];		/* initvals for jhash_1word */
++};
++
++struct ip_set_req_iphash_create {
++	uint32_t hashsize;
++	uint16_t probes;
++	uint16_t resize;
++	ip_set_ip_t netmask;
++};
++
++struct ip_set_req_iphash {
++	ip_set_ip_t ip;
++};
++
++#endif	/* __IP_SET_IPHASH_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipmap.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipmap.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipmap.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,56 @@
++#ifndef __IP_SET_IPMAP_H
++#define __IP_SET_IPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipmap"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_ipmap {
++	void *members;			/* the ipmap proper */
++	ip_set_ip_t first_ip;		/* host byte order, included in range */
++	ip_set_ip_t last_ip;		/* host byte order, included in range */
++	ip_set_ip_t netmask;		/* subnet netmask */
++	ip_set_ip_t sizeid;		/* size of set in IPs */
++	ip_set_ip_t hosts;		/* number of hosts in a subnet */
++};
++
++struct ip_set_req_ipmap_create {
++	ip_set_ip_t from;
++	ip_set_ip_t to;
++	ip_set_ip_t netmask;
++};
++
++struct ip_set_req_ipmap {
++	ip_set_ip_t ip;
++};
++
++unsigned int
++mask_to_bits(ip_set_ip_t mask)
++{
++	unsigned int bits = 32;
++	ip_set_ip_t maskaddr;
++	
++	if (mask == 0xFFFFFFFF)
++		return bits;
++	
++	maskaddr = 0xFFFFFFFE;
++	while (--bits >= 0 && maskaddr != mask)
++		maskaddr <<= 1;
++	
++	return bits;
++}
++
++ip_set_ip_t
++range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
++{
++	ip_set_ip_t mask = 0xFFFFFFFE;
++	
++	*bits = 32;
++	while (--(*bits) >= 0 && mask && (to & mask) != from)
++		mask <<= 1;
++		
++	return mask;
++}
++	
++#endif /* __IP_SET_IPMAP_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipporthash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipporthash.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipporthash.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipporthash.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,34 @@
++#ifndef __IP_SET_IPPORTHASH_H
++#define __IP_SET_IPPORTHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipporthash"
++#define MAX_RANGE 0x0000FFFF
++#define INVALID_PORT	(MAX_RANGE + 1)
++
++struct ip_set_ipporthash {
++	ip_set_ip_t *members;		/* the ipporthash proper */
++	uint32_t elements;		/* number of elements */
++	uint32_t hashsize;		/* hash size */
++	uint16_t probes;		/* max number of probes  */
++	uint16_t resize;		/* resize factor in percent */
++	ip_set_ip_t first_ip;		/* host byte order, included in range */
++	ip_set_ip_t last_ip;		/* host byte order, included in range */
++	void *initval[0];		/* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipporthash_create {
++	uint32_t hashsize;
++	uint16_t probes;
++	uint16_t resize;
++	ip_set_ip_t from;
++	ip_set_ip_t to;
++};
++
++struct ip_set_req_ipporthash {
++	ip_set_ip_t ip;
++	ip_set_ip_t port;
++};
++
++#endif	/* __IP_SET_IPPORTHASH_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iptree.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iptree.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iptree.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,40 @@
++#ifndef __IP_SET_IPTREE_H
++#define __IP_SET_IPTREE_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iptree"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iptreed {
++	unsigned long expires[256];	   	/* x.x.x.ADDR */
++};
++
++struct ip_set_iptreec {
++	struct ip_set_iptreed *tree[256];	/* x.x.ADDR.* */
++};
++
++struct ip_set_iptreeb {
++	struct ip_set_iptreec *tree[256];	/* x.ADDR.*.* */
++};
++
++struct ip_set_iptree {
++	unsigned int timeout;
++	unsigned int gc_interval;
++#ifdef __KERNEL__
++	uint32_t elements;		/* number of elements */
++	struct timer_list gc;
++	struct ip_set_iptreeb *tree[256];	/* ADDR.*.*.* */
++#endif
++};
++
++struct ip_set_req_iptree_create {
++	unsigned int timeout;
++};
++
++struct ip_set_req_iptree {
++	ip_set_ip_t ip;
++	unsigned int timeout;
++};
++
++#endif	/* __IP_SET_IPTREE_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_jhash.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_jhash.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_jhash.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,148 @@
++#ifndef _LINUX_IPSET_JHASH_H
++#define _LINUX_IPSET_JHASH_H
++
++/* This is a copy of linux/jhash.h but the types u32/u8 are changed
++ * to __u32/__u8 so that the header file can be included into
++ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
++ */
++
++/* jhash.h: Jenkins hash support.
++ *
++ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ *
++ * http://burtleburtle.net/bob/hash/
++ *
++ * These are the credits from Bob's sources:
++ *
++ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
++ * hash(), hash2(), hash3, and mix() are externally useful functions.
++ * Routines to test the hash are included if SELF_TEST is defined.
++ * You can use this free for any purpose.  It has no warranty.
++ *
++ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ *
++ * I've modified Bob's hash to be useful in the Linux kernel, and
++ * any bugs present are surely my fault.  -DaveM
++ */
++
++/* NOTE: Arguments are modified. */
++#define __jhash_mix(a, b, c) \
++{ \
++  a -= b; a -= c; a ^= (c>>13); \
++  b -= c; b -= a; b ^= (a<<8); \
++  c -= a; c -= b; c ^= (b>>13); \
++  a -= b; a -= c; a ^= (c>>12);  \
++  b -= c; b -= a; b ^= (a<<16); \
++  c -= a; c -= b; c ^= (b>>5); \
++  a -= b; a -= c; a ^= (c>>3);  \
++  b -= c; b -= a; b ^= (a<<10); \
++  c -= a; c -= b; c ^= (b>>15); \
++}
++
++/* The golden ration: an arbitrary value */
++#define JHASH_GOLDEN_RATIO	0x9e3779b9
++
++/* The most generic version, hashes an arbitrary sequence
++ * of bytes.  No alignment or length assumptions are made about
++ * the input key.
++ */
++static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++{
++	__u32 a, b, c, len;
++	__u8 *k = key;
++
++	len = length;
++	a = b = JHASH_GOLDEN_RATIO;
++	c = initval;
++
++	while (len >= 12) {
++		a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
++		b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
++		c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
++
++		__jhash_mix(a,b,c);
++
++		k += 12;
++		len -= 12;
++	}
++
++	c += length;
++	switch (len) {
++	case 11: c += ((__u32)k[10]<<24);
++	case 10: c += ((__u32)k[9]<<16);
++	case 9 : c += ((__u32)k[8]<<8);
++	case 8 : b += ((__u32)k[7]<<24);
++	case 7 : b += ((__u32)k[6]<<16);
++	case 6 : b += ((__u32)k[5]<<8);
++	case 5 : b += k[4];
++	case 4 : a += ((__u32)k[3]<<24);
++	case 3 : a += ((__u32)k[2]<<16);
++	case 2 : a += ((__u32)k[1]<<8);
++	case 1 : a += k[0];
++	};
++
++	__jhash_mix(a,b,c);
++
++	return c;
++}
++
++/* A special optimized version that handles 1 or more of __u32s.
++ * The length parameter here is the number of __u32s in the key.
++ */
++static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++{
++	__u32 a, b, c, len;
++
++	a = b = JHASH_GOLDEN_RATIO;
++	c = initval;
++	len = length;
++
++	while (len >= 3) {
++		a += k[0];
++		b += k[1];
++		c += k[2];
++		__jhash_mix(a, b, c);
++		k += 3; len -= 3;
++	}
++
++	c += length * 4;
++
++	switch (len) {
++	case 2 : b += k[1];
++	case 1 : a += k[0];
++	};
++
++	__jhash_mix(a,b,c);
++
++	return c;
++}
++
++
++/* A special ultra-optimized versions that knows they are hashing exactly
++ * 3, 2 or 1 word(s).
++ *
++ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
++ *       done at the end is not done here.
++ */
++static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++{
++	a += JHASH_GOLDEN_RATIO;
++	b += JHASH_GOLDEN_RATIO;
++	c += initval;
++
++	__jhash_mix(a, b, c);
++
++	return c;
++}
++
++static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++{
++	return jhash_3words(a, b, 0, initval);
++}
++
++static inline __u32 jhash_1word(__u32 a, __u32 initval)
++{
++	return jhash_3words(a, 0, 0, initval);
++}
++
++#endif /* _LINUX_IPSET_JHASH_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_macipmap.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_macipmap.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_macipmap.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,38 @@
++#ifndef __IP_SET_MACIPMAP_H
++#define __IP_SET_MACIPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "macipmap"
++#define MAX_RANGE 0x0000FFFF
++
++/* general flags */
++#define IPSET_MACIP_MATCHUNSET	1
++
++/* per ip flags */
++#define IPSET_MACIP_ISSET	1
++
++struct ip_set_macipmap {
++	void *members;			/* the macipmap proper */
++	ip_set_ip_t first_ip;		/* host byte order, included in range */
++	ip_set_ip_t last_ip;		/* host byte order, included in range */
++	u_int32_t flags;
++};
++
++struct ip_set_req_macipmap_create {
++	ip_set_ip_t from;
++	ip_set_ip_t to;
++	u_int32_t flags;
++};
++
++struct ip_set_req_macipmap {
++	ip_set_ip_t ip;
++	unsigned char ethernet[ETH_ALEN];
++};
++
++struct ip_set_macip {
++	unsigned short flags;
++	unsigned char ethernet[ETH_ALEN];
++};
++
++#endif	/* __IP_SET_MACIPMAP_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_malloc.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_malloc.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_malloc.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,116 @@
++#ifndef _IP_SET_MALLOC_H
++#define _IP_SET_MALLOC_H
++
++#ifdef __KERNEL__
++
++/* Memory allocation and deallocation */
++static size_t max_malloc_size = 0;
++
++static inline void init_max_malloc_size(void)
++{
++#define CACHE(x) max_malloc_size = x;
++#include <linux/kmalloc_sizes.h>
++#undef CACHE
++}
++
++static inline void * ip_set_malloc(size_t bytes)
++{
++	if (bytes > max_malloc_size)
++		return vmalloc(bytes);
++	else
++		return kmalloc(bytes, GFP_KERNEL);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++	if (bytes > max_malloc_size)
++		vfree(data);
++	else
++		kfree(data);
++}
++
++struct harray {
++	size_t max_elements;
++	void *arrays[0];
++};
++
++static inline void * 
++harray_malloc(size_t hashsize, size_t typesize, int flags)
++{
++	struct harray *harray;
++	size_t max_elements, size, i, j;
++
++	if (!max_malloc_size)
++		init_max_malloc_size();
++
++	if (typesize > max_malloc_size)
++		return NULL;
++
++	max_elements = max_malloc_size/typesize;
++	size = hashsize/max_elements;
++	if (hashsize % max_elements)
++		size++;
++	
++	/* Last pointer signals end of arrays */
++	harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
++			 flags);
++
++	if (!harray)
++		return NULL;
++	
++	for (i = 0; i < size - 1; i++) {
++		harray->arrays[i] = kmalloc(max_elements * typesize, flags);
++		if (!harray->arrays[i])
++			goto undo;
++		memset(harray->arrays[i], 0, max_elements * typesize);
++	}
++	harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize, 
++				    flags);
++	if (!harray->arrays[i])
++		goto undo;
++	memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
++
++	harray->max_elements = max_elements;
++	harray->arrays[size] = NULL;
++	
++	return (void *)harray;
++
++    undo:
++    	for (j = 0; j < i; j++) {
++    		kfree(harray->arrays[j]);
++    	}
++    	kfree(harray);
++    	return NULL;
++}
++
++static inline void harray_free(void *h)
++{
++	struct harray *harray = (struct harray *) h;
++	size_t i;
++	
++    	for (i = 0; harray->arrays[i] != NULL; i++)
++    		kfree(harray->arrays[i]);
++    	kfree(harray);
++}
++
++static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
++{
++	struct harray *harray = (struct harray *) h;
++	size_t i;
++	
++    	for (i = 0; harray->arrays[i+1] != NULL; i++)
++		memset(harray->arrays[i], 0, harray->max_elements * typesize);
++	memset(harray->arrays[i], 0, 
++	       (hashsize - i * harray->max_elements) * typesize);
++}
++
++#define HARRAY_ELEM(h, type, which)				\
++({								\
++	struct harray *__h = (struct harray *)(h);		\
++	((type)((__h)->arrays[(which)/(__h)->max_elements])	\
++		+ (which)%(__h)->max_elements);			\
++})
++
++#endif				/* __KERNEL__ */
++
++#endif /*_IP_SET_MALLOC_H*/
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_nethash.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_nethash.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_nethash.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,55 @@
++#ifndef __IP_SET_NETHASH_H
++#define __IP_SET_NETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "nethash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_nethash {
++	ip_set_ip_t *members;		/* the nethash proper */
++	uint32_t elements;		/* number of elements */
++	uint32_t hashsize;		/* hash size */
++	uint16_t probes;		/* max number of probes  */
++	uint16_t resize;		/* resize factor in percent */
++	unsigned char cidr[30];		/* CIDR sizes */
++	void *initval[0];		/* initvals for jhash_1word */
++};
++
++struct ip_set_req_nethash_create {
++	uint32_t hashsize;
++	uint16_t probes;
++	uint16_t resize;
++};
++
++struct ip_set_req_nethash {
++	ip_set_ip_t ip;
++	unsigned char cidr;
++};
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t 
++pack(ip_set_ip_t ip, unsigned char cidr)
++{
++	ip_set_ip_t addr, *paddr = &addr;
++	unsigned char n, t, *a;
++
++	addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++	DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++	n = cidr / 8;
++	t = cidr % 8;	
++	a = &((unsigned char *)paddr)[n];
++	*a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++	DP("n: %u, t: %u, a: %u", n, t, *a);
++	DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++	   HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++	return ntohl(addr);
++}
++
++#endif	/* __IP_SET_NETHASH_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_portmap.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_portmap.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_portmap.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,25 @@
++#ifndef __IP_SET_PORTMAP_H
++#define __IP_SET_PORTMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME	"portmap"
++#define MAX_RANGE	0x0000FFFF
++#define INVALID_PORT	(MAX_RANGE + 1)
++
++struct ip_set_portmap {
++	void *members;			/* the portmap proper */
++	ip_set_ip_t first_port;		/* host byte order, included in range */
++	ip_set_ip_t last_port;		/* host byte order, included in range */
++};
++
++struct ip_set_req_portmap_create {
++	ip_set_ip_t from;
++	ip_set_ip_t to;
++};
++
++struct ip_set_req_portmap {
++	ip_set_ip_t port;
++};
++
++#endif /* __IP_SET_PORTMAP_H */
+diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ipt_set.h
+--- linux-2.6.21.1/include/linux/netfilter_ipv4/ipt_set.h	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ipt_set.h	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,21 @@
++#ifndef _IPT_SET_H
++#define _IPT_SET_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++struct ipt_set_info {
++	ip_set_id_t index;
++	u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
++};
++
++/* match info */
++struct ipt_set_info_match {
++	struct ipt_set_info match_set;
++};
++
++struct ipt_set_info_target {
++	struct ipt_set_info add_set;
++	struct ipt_set_info del_set;
++};
++
++#endif /*_IPT_SET_H*/
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set.c	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,2001 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module for IP set management */
++
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kmod.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/random.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <asm/semaphore.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/ip_set.h>
++
++static struct list_head set_type_list;		/* all registered sets */
++static struct ip_set **ip_set_list;		/* all individual sets */
++static DEFINE_RWLOCK(ip_set_lock);		/* protects the lists and the hash */
++static DECLARE_MUTEX(ip_set_app_mutex);		/* serializes user access */
++static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
++static ip_set_id_t ip_set_bindings_hash_size =  CONFIG_IP_NF_SET_HASHSIZE;
++static struct list_head *ip_set_hash;		/* hash of bindings */
++static unsigned int ip_set_hash_random;		/* random seed */
++
++/*
++ * Sets are identified either by the index in ip_set_list or by id.
++ * The id never changes and is used to find a key in the hash. 
++ * The index may change by swapping and used at all other places 
++ * (set/SET netfilter modules, binding value, etc.)
++ *
++ * Userspace requests are serialized by ip_set_mutex and sets can
++ * be deleted only from userspace. Therefore ip_set_list locking 
++ * must obey the following rules:
++ *
++ * - kernel requests: read and write locking mandatory
++ * - user requests: read locking optional, write locking mandatory
++ */
++
++static inline void
++__ip_set_get(ip_set_id_t index)
++{
++	atomic_inc(&ip_set_list[index]->ref);
++}
++
++static inline void
++__ip_set_put(ip_set_id_t index)
++{
++	atomic_dec(&ip_set_list[index]->ref);
++}
++
++/*
++ * Binding routines
++ */
++
++static inline struct ip_set_hash *
++__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
++{
++	struct ip_set_hash *set_hash;
++
++	list_for_each_entry(set_hash, &ip_set_hash[key], list)
++		if (set_hash->id == id && set_hash->ip == ip)
++			return set_hash;
++			
++	return NULL;
++}
++
++static ip_set_id_t
++ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
++{
++	u_int32_t key = jhash_2words(id, ip, ip_set_hash_random) 
++				% ip_set_bindings_hash_size;
++	struct ip_set_hash *set_hash;
++
++	ASSERT_READ_LOCK(&ip_set_lock);
++	IP_SET_ASSERT(ip_set_list[id]);
++	DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));	
++	
++	set_hash = __ip_set_find(key, id, ip);
++	
++	DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name, 
++	   HIPQUAD(ip),
++	   set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++	return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
++}
++
++static inline void 
++__set_hash_del(struct ip_set_hash *set_hash)
++{
++	ASSERT_WRITE_LOCK(&ip_set_lock);
++	IP_SET_ASSERT(ip_set_list[set_hash->binding]);	
++
++	__ip_set_put(set_hash->binding);
++	list_del(&set_hash->list);
++	kfree(set_hash);
++}
++
++static int
++ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
++{
++	u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++				% ip_set_bindings_hash_size;
++	struct ip_set_hash *set_hash;
++	
++	IP_SET_ASSERT(ip_set_list[id]);
++	DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));	
++	write_lock_bh(&ip_set_lock);
++	set_hash = __ip_set_find(key, id, ip);
++	DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++	   HIPQUAD(ip),
++	   set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++	if (set_hash != NULL)
++		__set_hash_del(set_hash);
++	write_unlock_bh(&ip_set_lock);
++	return 0;
++}
++
++static int 
++ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
++{
++	u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++				% ip_set_bindings_hash_size;
++	struct ip_set_hash *set_hash;
++	int ret = 0;
++	
++	IP_SET_ASSERT(ip_set_list[id]);
++	IP_SET_ASSERT(ip_set_list[binding]);
++	DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name, 
++	   HIPQUAD(ip), ip_set_list[binding]->name);
++	write_lock_bh(&ip_set_lock);
++	set_hash = __ip_set_find(key, id, ip);
++	if (!set_hash) {
++		set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
++		if (!set_hash) {
++			ret = -ENOMEM;
++			goto unlock;
++		}
++		INIT_LIST_HEAD(&set_hash->list);
++		set_hash->id = id;
++		set_hash->ip = ip;
++		list_add(&set_hash->list, &ip_set_hash[key]);
++	} else {
++		IP_SET_ASSERT(ip_set_list[set_hash->binding]);	
++		DP("overwrite binding: %s",
++		   ip_set_list[set_hash->binding]->name);
++		__ip_set_put(set_hash->binding);
++	}
++	set_hash->binding = binding;
++	__ip_set_get(set_hash->binding);
++	DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
++	   key, id, ip_set_list[id]->name,
++	   HIPQUAD(ip), binding, ip_set_list[binding]->name);
++    unlock:
++	write_unlock_bh(&ip_set_lock);
++	return ret;
++}
++
++#define FOREACH_HASH_DO(fn, args...) 						\
++({										\
++	ip_set_id_t __key;							\
++	struct ip_set_hash *__set_hash;						\
++										\
++	for (__key = 0; __key < ip_set_bindings_hash_size; __key++) {		\
++		list_for_each_entry(__set_hash, &ip_set_hash[__key], list)	\
++			fn(__set_hash , ## args);				\
++	}									\
++})
++
++#define FOREACH_HASH_RW_DO(fn, args...) 						\
++({										\
++	ip_set_id_t __key;							\
++	struct ip_set_hash *__set_hash, *__n;					\
++										\
++	ASSERT_WRITE_LOCK(&ip_set_lock);					\
++	for (__key = 0; __key < ip_set_bindings_hash_size; __key++) {		\
++		list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
++			fn(__set_hash , ## args);				\
++	}									\
++})
++
++/* Add, del and test set entries from kernel */
++
++#define follow_bindings(index, set, ip)					\
++((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID	\
++ || (index = (set)->binding) != IP_SET_INVALID_ID)
++
++int
++ip_set_testip_kernel(ip_set_id_t index,
++		     const struct sk_buff *skb,
++		     const u_int32_t *flags)
++{
++	struct ip_set *set;
++	ip_set_ip_t ip;
++	int res;
++	unsigned char i = 0;
++	
++	IP_SET_ASSERT(flags[i]);
++	read_lock_bh(&ip_set_lock);
++	do {
++		set = ip_set_list[index];
++		IP_SET_ASSERT(set);
++		DP("set %s, index %u", set->name, index);
++		read_lock_bh(&set->lock);
++		res = set->type->testip_kernel(set, skb, &ip, flags, i++);
++		read_unlock_bh(&set->lock);
++		i += !!(set->type->features & IPSET_DATA_DOUBLE);
++	} while (res > 0 
++		 && flags[i] 
++		 && follow_bindings(index, set, ip));
++	read_unlock_bh(&ip_set_lock);
++
++	return res;
++}
++
++void
++ip_set_addip_kernel(ip_set_id_t index,
++		    const struct sk_buff *skb,
++		    const u_int32_t *flags)
++{
++	struct ip_set *set;
++	ip_set_ip_t ip;
++	int res;
++	unsigned char i = 0;
++
++	IP_SET_ASSERT(flags[i]);
++   retry:
++	read_lock_bh(&ip_set_lock);
++	do {
++		set = ip_set_list[index];
++		IP_SET_ASSERT(set);
++		DP("set %s, index %u", set->name, index);
++		write_lock_bh(&set->lock);
++		res = set->type->addip_kernel(set, skb, &ip, flags, i++);
++		write_unlock_bh(&set->lock);
++		i += !!(set->type->features & IPSET_DATA_DOUBLE);
++	} while ((res == 0 || res == -EEXIST)
++		 && flags[i] 
++		 && follow_bindings(index, set, ip));
++	read_unlock_bh(&ip_set_lock);
++
++	if (res == -EAGAIN
++	    && set->type->retry
++	    && (res = set->type->retry(set)) == 0)
++	    	goto retry;
++}
++
++void
++ip_set_delip_kernel(ip_set_id_t index,
++		    const struct sk_buff *skb,
++		    const u_int32_t *flags)
++{
++	struct ip_set *set;
++	ip_set_ip_t ip;
++	int res;
++	unsigned char i = 0;
++
++	IP_SET_ASSERT(flags[i]);
++	read_lock_bh(&ip_set_lock);
++	do {
++		set = ip_set_list[index];
++		IP_SET_ASSERT(set);
++		DP("set %s, index %u", set->name, index);
++		write_lock_bh(&set->lock);
++		res = set->type->delip_kernel(set, skb, &ip, flags, i++);
++		write_unlock_bh(&set->lock);
++		i += !!(set->type->features & IPSET_DATA_DOUBLE);
++	} while ((res == 0 || res == -EEXIST)
++		 && flags[i] 
++		 && follow_bindings(index, set, ip));
++	read_unlock_bh(&ip_set_lock);
++}
++
++/* Register and deregister settype */
++
++static inline struct ip_set_type *
++find_set_type(const char *name)
++{
++	struct ip_set_type *set_type;
++
++	list_for_each_entry(set_type, &set_type_list, list)
++		if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
++			return set_type;
++	return NULL;
++}
++
++int 
++ip_set_register_set_type(struct ip_set_type *set_type)
++{
++	int ret = 0;
++	
++	if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
++		ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
++			      set_type->typename,
++			      set_type->protocol_version,
++			      IP_SET_PROTOCOL_VERSION);
++		return -EINVAL;
++	}
++
++	write_lock_bh(&ip_set_lock);
++	if (find_set_type(set_type->typename)) {
++		/* Duplicate! */
++		ip_set_printk("'%s' already registered!", 
++			      set_type->typename);
++		ret = -EINVAL;
++		goto unlock;
++	}
++	if (!try_module_get(THIS_MODULE)) {
++		ret = -EFAULT;
++		goto unlock;
++	}
++	list_add(&set_type->list, &set_type_list);
++	DP("'%s' registered.", set_type->typename);
++   unlock:
++	write_unlock_bh(&ip_set_lock);
++	return ret;
++}
++
++void
++ip_set_unregister_set_type(struct ip_set_type *set_type)
++{
++	write_lock_bh(&ip_set_lock);
++	if (!find_set_type(set_type->typename)) {
++		ip_set_printk("'%s' not registered?",
++			      set_type->typename);
++		goto unlock;
++	}
++	list_del(&set_type->list);
++	module_put(THIS_MODULE);
++	DP("'%s' unregistered.", set_type->typename);
++   unlock:
++	write_unlock_bh(&ip_set_lock);
++
++}
++
++/*
++ * Userspace routines
++ */
++
++/*
++ * Find set by name, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byname(const char *name)
++{
++	ip_set_id_t i, index = IP_SET_INVALID_ID;
++	
++	down(&ip_set_app_mutex);
++	for (i = 0; i < ip_set_max; i++) {
++		if (ip_set_list[i] != NULL
++		    && strcmp(ip_set_list[i]->name, name) == 0) {
++			__ip_set_get(i);
++			index = i;
++			break;
++		}
++	}
++	up(&ip_set_app_mutex);
++	return index;
++}
++
++/*
++ * Find set by index, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byindex(ip_set_id_t index)
++{
++	down(&ip_set_app_mutex);
++
++	if (index >= ip_set_max)
++		return IP_SET_INVALID_ID;
++	
++	if (ip_set_list[index])
++		__ip_set_get(index);
++	else
++		index = IP_SET_INVALID_ID;
++		
++	up(&ip_set_app_mutex);
++	return index;
++}
++
++/*
++ * If the given set pointer points to a valid set, decrement
++ * reference count by 1. The caller shall not assume the index
++ * to be valid, after calling this function.
++ */
++void ip_set_put(ip_set_id_t index)
++{
++	down(&ip_set_app_mutex);
++	if (ip_set_list[index])
++		__ip_set_put(index);
++	up(&ip_set_app_mutex);
++}
++
++/* Find a set by name or index */
++static ip_set_id_t
++ip_set_find_byname(const char *name)
++{
++	ip_set_id_t i, index = IP_SET_INVALID_ID;
++	
++	for (i = 0; i < ip_set_max; i++) {
++		if (ip_set_list[i] != NULL
++		    && strcmp(ip_set_list[i]->name, name) == 0) {
++			index = i;
++			break;
++		}
++	}
++	return index;
++}
++
++static ip_set_id_t
++ip_set_find_byindex(ip_set_id_t index)
++{
++	if (index >= ip_set_max || ip_set_list[index] == NULL)
++		index = IP_SET_INVALID_ID;
++	
++	return index;
++}
++
++/*
++ * Add, del, test, bind and unbind
++ */
++
++static inline int
++__ip_set_testip(struct ip_set *set,
++	        const void *data,
++	        size_t size,
++	        ip_set_ip_t *ip)
++{
++	int res;
++
++	read_lock_bh(&set->lock);
++	res = set->type->testip(set, data, size, ip);
++	read_unlock_bh(&set->lock);
++
++	return res;
++}
++
++static int
++__ip_set_addip(ip_set_id_t index,
++	       const void *data,
++	       size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	ip_set_ip_t ip;
++	int res;
++	
++	IP_SET_ASSERT(set);
++	do {
++		write_lock_bh(&set->lock);
++		res = set->type->addip(set, data, size, &ip);
++		write_unlock_bh(&set->lock);
++	} while (res == -EAGAIN
++		 && set->type->retry
++		 && (res = set->type->retry(set)) == 0);
++
++	return res;
++}
++
++static int
++ip_set_addip(ip_set_id_t index,
++	     const void *data,
++	     size_t size)
++{
++
++	return __ip_set_addip(index,
++			      data + sizeof(struct ip_set_req_adt),
++			      size - sizeof(struct ip_set_req_adt));
++}
++
++static int
++ip_set_delip(ip_set_id_t index,
++	     const void *data,
++	     size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	ip_set_ip_t ip;
++	int res;
++	
++	IP_SET_ASSERT(set);
++	write_lock_bh(&set->lock);
++	res = set->type->delip(set,
++			       data + sizeof(struct ip_set_req_adt),
++			       size - sizeof(struct ip_set_req_adt),
++			       &ip);
++	write_unlock_bh(&set->lock);
++
++	return res;
++}
++
++static int
++ip_set_testip(ip_set_id_t index,
++	      const void *data,
++	      size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	ip_set_ip_t ip;
++	int res;
++
++	IP_SET_ASSERT(set);
++	res = __ip_set_testip(set,
++			      data + sizeof(struct ip_set_req_adt),
++			      size - sizeof(struct ip_set_req_adt),
++			      &ip);
++
++	return (res > 0 ? -EEXIST : res);
++}
++
++static int
++ip_set_bindip(ip_set_id_t index,
++	      const void *data,
++	      size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	struct ip_set_req_bind *req_bind;
++	ip_set_id_t binding;
++	ip_set_ip_t ip;
++	int res;
++
++	IP_SET_ASSERT(set);
++	if (size < sizeof(struct ip_set_req_bind))
++		return -EINVAL;
++		
++	req_bind = (struct ip_set_req_bind *) data;
++	req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++	if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++		/* Default binding of a set */
++		char *binding_name;
++		
++		if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++			return -EINVAL;
++
++		binding_name = (char *)(data + sizeof(struct ip_set_req_bind));	
++		binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++		binding = ip_set_find_byname(binding_name);
++		if (binding == IP_SET_INVALID_ID)
++			return -ENOENT;
++
++		write_lock_bh(&ip_set_lock);
++		/* Sets as binding values are referenced */
++		if (set->binding != IP_SET_INVALID_ID)
++			__ip_set_put(set->binding);
++		set->binding = binding;
++		__ip_set_get(set->binding);
++		write_unlock_bh(&ip_set_lock);
++
++		return 0;
++	}
++	binding = ip_set_find_byname(req_bind->binding);
++	if (binding == IP_SET_INVALID_ID)
++		return -ENOENT;
++
++	res = __ip_set_testip(set,
++			      data + sizeof(struct ip_set_req_bind),
++			      size - sizeof(struct ip_set_req_bind),
++			      &ip);
++	DP("set %s, ip: %u.%u.%u.%u, binding %s",
++	   set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++	
++	if (res >= 0)
++		res = ip_set_hash_add(set->id, ip, binding);
++
++	return res;
++}
++
++#define FOREACH_SET_DO(fn, args...) 				\
++({								\
++	ip_set_id_t __i;					\
++	struct ip_set *__set;					\
++								\
++	for (__i = 0; __i < ip_set_max; __i++) {		\
++		__set = ip_set_list[__i];			\
++		if (__set != NULL)				\
++			fn(__set , ##args);			\
++	}							\
++})
++
++static inline void
++__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
++{
++	if (set_hash->id == id)
++		__set_hash_del(set_hash);
++}
++
++static inline void
++__unbind_default(struct ip_set *set)
++{
++	if (set->binding != IP_SET_INVALID_ID) {
++		/* Sets as binding values are referenced */
++		__ip_set_put(set->binding);
++		set->binding = IP_SET_INVALID_ID;
++	}
++}
++
++static int
++ip_set_unbindip(ip_set_id_t index,
++	        const void *data,
++	        size_t size)
++{
++	struct ip_set *set;
++	struct ip_set_req_bind *req_bind;
++	ip_set_ip_t ip;
++	int res;
++
++	DP("");
++	if (size < sizeof(struct ip_set_req_bind))
++		return -EINVAL;
++		
++	req_bind = (struct ip_set_req_bind *) data;
++	req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++	
++	DP("%u %s", index, req_bind->binding);
++	if (index == IP_SET_INVALID_ID) {
++		/* unbind :all: */
++		if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++			/* Default binding of sets */
++			write_lock_bh(&ip_set_lock);
++			FOREACH_SET_DO(__unbind_default);
++			write_unlock_bh(&ip_set_lock);
++			return 0;
++		} else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++			/* Flush all bindings of all sets*/
++			write_lock_bh(&ip_set_lock);
++			FOREACH_HASH_RW_DO(__set_hash_del);
++			write_unlock_bh(&ip_set_lock);
++			return 0;
++		}
++		DP("unreachable reached!");
++		return -EINVAL;
++	}
++	
++	set = ip_set_list[index];
++	IP_SET_ASSERT(set);
++	if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++		/* Default binding of set */
++		ip_set_id_t binding = ip_set_find_byindex(set->binding);
++
++		if (binding == IP_SET_INVALID_ID)
++			return -ENOENT;
++			
++		write_lock_bh(&ip_set_lock);
++		/* Sets in hash values are referenced */
++		__ip_set_put(set->binding);
++		set->binding = IP_SET_INVALID_ID;
++		write_unlock_bh(&ip_set_lock);
++
++		return 0;
++	} else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++		/* Flush all bindings */
++
++		write_lock_bh(&ip_set_lock);
++		FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++		write_unlock_bh(&ip_set_lock);
++		return 0;
++	}
++	
++	res = __ip_set_testip(set,
++			      data + sizeof(struct ip_set_req_bind),
++			      size - sizeof(struct ip_set_req_bind),
++			      &ip);
++
++	DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
++	if (res >= 0)
++		res = ip_set_hash_del(set->id, ip);
++
++	return res;
++}
++
++static int
++ip_set_testbind(ip_set_id_t index,
++	        const void *data,
++	        size_t size)
++{
++	struct ip_set *set = ip_set_list[index];
++	struct ip_set_req_bind *req_bind;
++	ip_set_id_t binding;
++	ip_set_ip_t ip;
++	int res;
++
++	IP_SET_ASSERT(set);
++	if (size < sizeof(struct ip_set_req_bind))
++		return -EINVAL;
++		
++	req_bind = (struct ip_set_req_bind *) data;
++	req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++	if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++		/* Default binding of set */
++		char *binding_name;
++		
++		if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++			return -EINVAL;
++
++		binding_name = (char *)(data + sizeof(struct ip_set_req_bind));	
++		binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++		binding = ip_set_find_byname(binding_name);
++		if (binding == IP_SET_INVALID_ID)
++			return -ENOENT;
++		
++		res = (set->binding == binding) ? -EEXIST : 0;
++
++		return res;
++	}
++	binding = ip_set_find_byname(req_bind->binding);
++	if (binding == IP_SET_INVALID_ID)
++		return -ENOENT;
++		
++	
++	res = __ip_set_testip(set,
++			      data + sizeof(struct ip_set_req_bind),
++			      size - sizeof(struct ip_set_req_bind),
++			      &ip);
++	DP("set %s, ip: %u.%u.%u.%u, binding %s",
++	   set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++	   
++	if (res >= 0)
++		res = (ip_set_find_in_hash(set->id, ip) == binding)
++			? -EEXIST : 0;
++
++	return res;
++}
++
++static struct ip_set_type *
++find_set_type_rlock(const char *typename)
++{
++	struct ip_set_type *type;
++	
++	read_lock_bh(&ip_set_lock);
++	type = find_set_type(typename);
++	if (type == NULL)
++		read_unlock_bh(&ip_set_lock);
++
++	return type;
++}
++
++static int
++find_free_id(const char *name,
++	     ip_set_id_t *index,
++	     ip_set_id_t *id)
++{
++	ip_set_id_t i;
++
++	*id = IP_SET_INVALID_ID;
++	for (i = 0;  i < ip_set_max; i++) {
++		if (ip_set_list[i] == NULL) {
++			if (*id == IP_SET_INVALID_ID)
++				*id = *index = i;
++		} else if (strcmp(name, ip_set_list[i]->name) == 0)
++			/* Name clash */
++			return -EEXIST;
++	}
++	if (*id == IP_SET_INVALID_ID)
++		/* No free slot remained */
++		return -ERANGE;
++	/* Check that index is usable as id (swapping) */
++    check:	
++	for (i = 0;  i < ip_set_max; i++) {
++		if (ip_set_list[i] != NULL
++		    && ip_set_list[i]->id == *id) {
++		    *id = i;
++		    goto check;
++		}
++	}
++	return 0;
++}
++
++/*
++ * Create a set
++ */
++static int
++ip_set_create(const char *name,
++	      const char *typename,
++	      ip_set_id_t restore,
++	      const void *data,
++	      size_t size)
++{
++	struct ip_set *set;
++	ip_set_id_t index = 0, id;
++	int res = 0;
++
++	DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++	/*
++	 * First, and without any locks, allocate and initialize
++	 * a normal base set structure.
++	 */
++	set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
++	if (!set)
++		return -ENOMEM;
++	set->lock = RW_LOCK_UNLOCKED;
++	strncpy(set->name, name, IP_SET_MAXNAMELEN);
++	set->binding = IP_SET_INVALID_ID;
++	atomic_set(&set->ref, 0);
++
++	/*
++	 * Next, take the &ip_set_lock, check that we know the type,
++	 * and take a reference on the type, to make sure it
++	 * stays available while constructing our new set.
++	 *
++	 * After referencing the type, we drop the &ip_set_lock,
++	 * and let the new set construction run without locks.
++	 */
++	set->type = find_set_type_rlock(typename);
++	if (set->type == NULL) {
++		/* Try loading the module */
++		char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
++		strcpy(modulename, "ip_set_");
++		strcat(modulename, typename);
++		DP("try to load %s", modulename);
++		request_module(modulename);
++		set->type = find_set_type_rlock(typename);
++	}
++	if (set->type == NULL) {
++		ip_set_printk("no set type '%s', set '%s' not created",
++			      typename, name);
++		res = -ENOENT;
++		goto out;
++	}
++	if (!try_module_get(set->type->me)) {
++		read_unlock_bh(&ip_set_lock);
++		res = -EFAULT;
++		goto out;
++	}
++	read_unlock_bh(&ip_set_lock);
++
++	/*
++	 * Without holding any locks, create private part.
++	 */
++	res = set->type->create(set, data, size);
++	if (res != 0)
++		goto put_out;
++
++	/* BTW, res==0 here. */
++
++	/*
++	 * Here, we have a valid, constructed set. &ip_set_lock again,
++	 * find free id/index and check that it is not already in 
++	 * ip_set_list.
++	 */
++	write_lock_bh(&ip_set_lock);
++	if ((res = find_free_id(set->name, &index, &id)) != 0) {
++		DP("no free id!");
++		goto cleanup;
++	}
++
++	/* Make sure restore gets the same index */
++	if (restore != IP_SET_INVALID_ID && index != restore) {
++		DP("Can't restore, sets are screwed up");
++		res = -ERANGE;
++		goto cleanup;
++	}
++	 
++	/*
++	 * Finally! Add our shiny new set to the list, and be done.
++	 */
++	DP("create: '%s' created with index %u, id %u!", set->name, index, id);
++	set->id = id;
++	ip_set_list[index] = set;
++	write_unlock_bh(&ip_set_lock);
++	return res;
++	
++    cleanup:
++	write_unlock_bh(&ip_set_lock);
++	set->type->destroy(set);
++    put_out:
++	module_put(set->type->me);
++    out:
++	kfree(set);
++	return res;
++}
++
++/*
++ * Destroy a given existing set
++ */
++static void
++ip_set_destroy_set(ip_set_id_t index)
++{
++	struct ip_set *set = ip_set_list[index];
++
++	IP_SET_ASSERT(set);
++	DP("set: %s",  set->name);
++	write_lock_bh(&ip_set_lock);
++	FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++	if (set->binding != IP_SET_INVALID_ID)
++		__ip_set_put(set->binding);
++	ip_set_list[index] = NULL;
++	write_unlock_bh(&ip_set_lock);
++
++	/* Must call it without holding any lock */
++	set->type->destroy(set);
++	module_put(set->type->me);
++	kfree(set);
++}
++
++/*
++ * Destroy a set - or all sets
++ * Sets must not be referenced/used.
++ */
++static int
++ip_set_destroy(ip_set_id_t index)
++{
++	ip_set_id_t i;
++
++	/* ref modification always protected by the mutex */
++	if (index != IP_SET_INVALID_ID) {
++		if (atomic_read(&ip_set_list[index]->ref))
++			return -EBUSY;
++		ip_set_destroy_set(index);
++	} else {
++		for (i = 0; i < ip_set_max; i++) {
++			if (ip_set_list[i] != NULL 
++			    && (atomic_read(&ip_set_list[i]->ref)))
++			    	return -EBUSY;
++		}
++
++		for (i = 0; i < ip_set_max; i++) {
++			if (ip_set_list[i] != NULL)
++				ip_set_destroy_set(i);
++		}
++	}
++	return 0;
++}
++
++static void
++ip_set_flush_set(struct ip_set *set)
++{
++	DP("set: %s %u",  set->name, set->id);
++
++	write_lock_bh(&set->lock);
++	set->type->flush(set);
++	write_unlock_bh(&set->lock);
++}
++
++/* 
++ * Flush data in a set - or in all sets
++ */
++static int
++ip_set_flush(ip_set_id_t index)
++{
++	if (index != IP_SET_INVALID_ID) {
++		IP_SET_ASSERT(ip_set_list[index]);
++		ip_set_flush_set(ip_set_list[index]);
++	} else
++		FOREACH_SET_DO(ip_set_flush_set);
++
++	return 0;
++}
++
++/* Rename a set */
++static int
++ip_set_rename(ip_set_id_t index, const char *name)
++{
++	struct ip_set *set = ip_set_list[index];
++	ip_set_id_t i;
++	int res = 0;
++
++	DP("set: %s to %s",  set->name, name);
++	write_lock_bh(&ip_set_lock);
++	for (i = 0; i < ip_set_max; i++) {
++		if (ip_set_list[i] != NULL
++		    && strncmp(ip_set_list[i]->name, 
++			       name,
++			       IP_SET_MAXNAMELEN - 1) == 0) {
++			res = -EEXIST;
++			goto unlock;
++		}
++	}
++	strncpy(set->name, name, IP_SET_MAXNAMELEN);
++    unlock:
++	write_unlock_bh(&ip_set_lock);
++	return res;
++}
++
++/*
++ * Swap two sets so that name/index points to the other.
++ * References are also swapped.
++ */
++static int
++ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
++{
++	struct ip_set *from = ip_set_list[from_index];
++	struct ip_set *to = ip_set_list[to_index];
++	char from_name[IP_SET_MAXNAMELEN];
++	u_int32_t from_ref;
++
++	DP("set: %s to %s",  from->name, to->name);
++	/* Features must not change. Artifical restriction. */
++	if (from->type->features != to->type->features)
++		return -ENOEXEC;
++
++	/* No magic here: ref munging protected by the mutex */	
++	write_lock_bh(&ip_set_lock);
++	strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
++	from_ref = atomic_read(&from->ref);
++
++	strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
++	atomic_set(&from->ref, atomic_read(&to->ref));
++	strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
++	atomic_set(&to->ref, from_ref);
++	
++	ip_set_list[from_index] = to;
++	ip_set_list[to_index] = from;
++	
++	write_unlock_bh(&ip_set_lock);
++	return 0;
++}
++
++/*
++ * List set data
++ */
++
++static inline void
++__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
++			      ip_set_id_t id, size_t *size)
++{
++	if (set_hash->id == id)
++		*size += sizeof(struct ip_set_hash_list);
++}
++
++static inline void
++__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
++			      ip_set_id_t id, size_t *size)
++{
++	if (set_hash->id == id)
++		*size += sizeof(struct ip_set_hash_save);
++}
++
++static inline void
++__set_hash_bindings(struct ip_set_hash *set_hash,
++		    ip_set_id_t id, void *data, int *used)
++{
++	if (set_hash->id == id) {
++		struct ip_set_hash_list *hash_list = 
++			(struct ip_set_hash_list *)(data + *used);
++
++		hash_list->ip = set_hash->ip;
++		hash_list->binding = set_hash->binding;
++		*used += sizeof(struct ip_set_hash_list);
++	}
++}
++
++static int ip_set_list_set(ip_set_id_t index,
++			   void *data,
++			   int *used,
++			   int len)
++{
++	struct ip_set *set = ip_set_list[index];
++	struct ip_set_list *set_list;
++
++	/* Pointer to our header */
++	set_list = (struct ip_set_list *) (data + *used);
++
++	DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
++
++	/* Get and ensure header size */
++	if (*used + sizeof(struct ip_set_list) > len)
++		goto not_enough_mem;
++	*used += sizeof(struct ip_set_list);
++
++	read_lock_bh(&set->lock);
++	/* Get and ensure set specific header size */
++	set_list->header_size = set->type->header_size;
++	if (*used + set_list->header_size > len)
++		goto unlock_set;
++
++	/* Fill in the header */
++	set_list->index = index;
++	set_list->binding = set->binding;
++	set_list->ref = atomic_read(&set->ref);
++
++	/* Fill in set spefific header data */
++	set->type->list_header(set, data + *used);
++	*used += set_list->header_size;
++
++	/* Get and ensure set specific members size */
++	set_list->members_size = set->type->list_members_size(set);
++	if (*used + set_list->members_size > len)
++		goto unlock_set;
++
++	/* Fill in set spefific members data */
++	set->type->list_members(set, data + *used);
++	*used += set_list->members_size;
++	read_unlock_bh(&set->lock);
++
++	/* Bindings */
++
++	/* Get and ensure set specific bindings size */
++	set_list->bindings_size = 0;
++	FOREACH_HASH_DO(__set_hash_bindings_size_list,
++			set->id, &set_list->bindings_size);
++	if (*used + set_list->bindings_size > len)
++		goto not_enough_mem;
++
++	/* Fill in set spefific bindings data */
++	FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
++	
++	return 0;
++
++    unlock_set:
++	read_unlock_bh(&set->lock);
++    not_enough_mem:
++	DP("not enough mem, try again");
++	return -EAGAIN;
++}
++
++/*
++ * Save sets
++ */
++static int ip_set_save_set(ip_set_id_t index,
++			   void *data,
++			   int *used,
++			   int len)
++{
++	struct ip_set *set;
++	struct ip_set_save *set_save;
++
++	/* Pointer to our header */
++	set_save = (struct ip_set_save *) (data + *used);
++
++	/* Get and ensure header size */
++	if (*used + sizeof(struct ip_set_save) > len)
++		goto not_enough_mem;
++	*used += sizeof(struct ip_set_save);
++
++	set = ip_set_list[index];
++	DP("set: %s, used: %u(%u) %p %p", set->name, *used, len, 
++	   data, data + *used);
++
++	read_lock_bh(&set->lock);
++	/* Get and ensure set specific header size */
++	set_save->header_size = set->type->header_size;
++	if (*used + set_save->header_size > len)
++		goto unlock_set;
++
++	/* Fill in the header */
++	set_save->index = index;
++	set_save->binding = set->binding;
++
++	/* Fill in set spefific header data */
++	set->type->list_header(set, data + *used);
++	*used += set_save->header_size;
++
++	DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
++	   set_save->header_size, data, data + *used);
++	/* Get and ensure set specific members size */
++	set_save->members_size = set->type->list_members_size(set);
++	if (*used + set_save->members_size > len)
++		goto unlock_set;
++
++	/* Fill in set spefific members data */
++	set->type->list_members(set, data + *used);
++	*used += set_save->members_size;
++	read_unlock_bh(&set->lock);
++	DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
++	   set_save->members_size, data, data + *used);
++	return 0;
++
++    unlock_set:
++	read_unlock_bh(&set->lock);
++    not_enough_mem:
++	DP("not enough mem, try again");
++	return -EAGAIN;
++}
++
++static inline void
++__set_hash_save_bindings(struct ip_set_hash *set_hash,
++			 ip_set_id_t id,
++			 void *data,
++			 int *used,
++			 int len,
++			 int *res)
++{
++	if (*res == 0
++	    && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
++		struct ip_set_hash_save *hash_save = 
++			(struct ip_set_hash_save *)(data + *used);
++		/* Ensure bindings size */
++		if (*used + sizeof(struct ip_set_hash_save) > len) {
++			*res = -ENOMEM;
++			return;
++		}
++		hash_save->id = set_hash->id;
++		hash_save->ip = set_hash->ip;
++		hash_save->binding = set_hash->binding;
++		*used += sizeof(struct ip_set_hash_save);
++	}
++}
++
++static int ip_set_save_bindings(ip_set_id_t index,
++			   	void *data,
++			   	int *used,
++			   	int len)
++{
++	int res = 0;
++	struct ip_set_save *set_save;
++
++	DP("used %u, len %u", *used, len);
++	/* Get and ensure header size */
++	if (*used + sizeof(struct ip_set_save) > len)
++		return -ENOMEM;
++
++	/* Marker */
++	set_save = (struct ip_set_save *) (data + *used);
++	set_save->index = IP_SET_INVALID_ID;
++	set_save->header_size = 0;
++	set_save->members_size = 0;
++	*used += sizeof(struct ip_set_save);
++
++	DP("marker added used %u, len %u", *used, len);
++	/* Fill in bindings data */
++	if (index != IP_SET_INVALID_ID)
++		/* Sets are identified by id in hash */
++		index = ip_set_list[index]->id;
++	FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
++
++	return res;	
++}
++
++/*
++ * Restore sets
++ */
++static int ip_set_restore(void *data,
++			  int len)
++{
++	int res = 0;
++	int line = 0, used = 0, members_size;
++	struct ip_set *set;
++	struct ip_set_hash_save *hash_save;
++	struct ip_set_restore *set_restore;
++	ip_set_id_t index;
++
++	/* Loop to restore sets */
++	while (1) {
++		line++;
++		
++		DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++		/* Get and ensure header size */
++		if (used + sizeof(struct ip_set_restore) > len)
++			return line;
++		set_restore = (struct ip_set_restore *) (data + used);
++		used += sizeof(struct ip_set_restore);
++
++		/* Ensure data size */
++		if (used 
++		    + set_restore->header_size 
++		    + set_restore->members_size > len)
++			return line;
++
++		/* Check marker */
++		if (set_restore->index == IP_SET_INVALID_ID) {
++			line--;
++			goto bindings;
++		}
++		
++		/* Try to create the set */
++		DP("restore %s %s", set_restore->name, set_restore->typename);
++		res = ip_set_create(set_restore->name,
++				    set_restore->typename,
++				    set_restore->index,
++				    data + used,
++				    set_restore->header_size);
++		
++		if (res != 0)
++			return line;
++		used += set_restore->header_size;
++
++		index = ip_set_find_byindex(set_restore->index);
++		DP("index %u, restore_index %u", index, set_restore->index);
++		if (index != set_restore->index)
++			return line;
++		/* Try to restore members data */
++		set = ip_set_list[index];
++		members_size = 0;
++		DP("members_size %u reqsize %u",
++		   set_restore->members_size, set->type->reqsize);
++		while (members_size + set->type->reqsize <=
++		       set_restore->members_size) {
++			line++;
++		       	DP("members: %u, line %u", members_size, line);
++			res = __ip_set_addip(index,
++					   data + used + members_size,
++					   set->type->reqsize);
++			if (!(res == 0 || res == -EEXIST)) 
++				return line;
++			members_size += set->type->reqsize;
++		}
++
++		DP("members_size %u  %u",
++		   set_restore->members_size, members_size);
++		if (members_size != set_restore->members_size)
++			return line++;
++		used += set_restore->members_size;		
++	}
++	
++   bindings:
++   	/* Loop to restore bindings */
++   	while (used < len) {
++		line++;
++
++		DP("restore binding, line %u", line);		
++		/* Get and ensure size */
++		if (used + sizeof(struct ip_set_hash_save) > len)
++			return line;
++		hash_save = (struct ip_set_hash_save *) (data + used);
++		used += sizeof(struct ip_set_hash_save);
++		
++		/* hash_save->id is used to store the index */
++		index = ip_set_find_byindex(hash_save->id);
++		DP("restore binding index %u, id %u, %u -> %u",
++		   index, hash_save->id, hash_save->ip, hash_save->binding);		
++		if (index != hash_save->id)
++			return line;
++		if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
++			DP("corrupt binding set index %u", hash_save->binding);
++			return line;
++		}
++		set = ip_set_list[hash_save->id];
++		/* Null valued IP means default binding */
++		if (hash_save->ip)
++			res = ip_set_hash_add(set->id, 
++					      hash_save->ip,
++					      hash_save->binding);
++		else {
++			IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
++			write_lock_bh(&ip_set_lock);
++			set->binding = hash_save->binding;
++			__ip_set_get(set->binding);
++			write_unlock_bh(&ip_set_lock);
++			DP("default binding: %u", set->binding);
++		}
++		if (res != 0)
++			return line;
++   	}
++   	if (used != len)
++   		return line;
++   	
++	return 0;	
++}
++
++static int
++ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
++{
++	void *data;
++	int res = 0;		/* Assume OK */
++	unsigned *op;
++	struct ip_set_req_adt *req_adt;
++	ip_set_id_t index = IP_SET_INVALID_ID;
++	int (*adtfn)(ip_set_id_t index,
++		     const void *data, size_t size);
++	struct fn_table {
++		int (*fn)(ip_set_id_t index,
++			  const void *data, size_t size);
++	} adtfn_table[] =
++	{ { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
++	  { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
++	};
++
++	DP("optval=%d, user=%p, len=%d", optval, user, len);
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++	if (optval != SO_IP_SET)
++		return -EBADF;
++	if (len <= sizeof(unsigned)) {
++		ip_set_printk("short userdata (want >%zu, got %u)",
++			      sizeof(unsigned), len);
++		return -EINVAL;
++	}
++	data = vmalloc(len);
++	if (!data) {
++		DP("out of mem for %u bytes", len);
++		return -ENOMEM;
++	}
++	if (copy_from_user(data, user, len) != 0) {
++		res = -EFAULT;
++		goto done;
++	}
++	if (down_interruptible(&ip_set_app_mutex)) {
++		res = -EINTR;
++		goto done;
++	}
++
++	op = (unsigned *)data;
++	DP("op=%x", *op);
++	
++	if (*op < IP_SET_OP_VERSION) {
++		/* Check the version at the beginning of operations */
++		struct ip_set_req_version *req_version =
++			(struct ip_set_req_version *) data;
++		if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++			res = -EPROTO;
++			goto done;
++		}
++	}
++
++	switch (*op) {
++	case IP_SET_OP_CREATE:{
++		struct ip_set_req_create *req_create
++			= (struct ip_set_req_create *) data;
++		
++		if (len < sizeof(struct ip_set_req_create)) {
++			ip_set_printk("short CREATE data (want >=%zu, got %u)",
++				      sizeof(struct ip_set_req_create), len);
++			res = -EINVAL;
++			goto done;
++		}
++		req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
++		req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++		res = ip_set_create(req_create->name,
++				    req_create->typename,
++				    IP_SET_INVALID_ID,
++				    data + sizeof(struct ip_set_req_create),
++				    len - sizeof(struct ip_set_req_create));
++		goto done;
++	}
++	case IP_SET_OP_DESTROY:{
++		struct ip_set_req_std *req_destroy
++			= (struct ip_set_req_std *) data;
++		
++		if (len != sizeof(struct ip_set_req_std)) {
++			ip_set_printk("invalid DESTROY data (want %zu, got %u)",
++				      sizeof(struct ip_set_req_std), len);
++			res = -EINVAL;
++			goto done;
++		}
++		if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++			/* Destroy all sets */
++			index = IP_SET_INVALID_ID;
++		} else {
++			req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
++			index = ip_set_find_byname(req_destroy->name);
++
++			if (index == IP_SET_INVALID_ID) {
++				res = -ENOENT;
++				goto done;
++			}
++		}
++			
++		res = ip_set_destroy(index);
++		goto done;
++	}
++	case IP_SET_OP_FLUSH:{
++		struct ip_set_req_std *req_flush =
++			(struct ip_set_req_std *) data;
++
++		if (len != sizeof(struct ip_set_req_std)) {
++			ip_set_printk("invalid FLUSH data (want %zu, got %u)",
++				      sizeof(struct ip_set_req_std), len);
++			res = -EINVAL;
++			goto done;
++		}
++		if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++			/* Flush all sets */
++			index = IP_SET_INVALID_ID;
++		} else {
++			req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
++			index = ip_set_find_byname(req_flush->name);
++
++			if (index == IP_SET_INVALID_ID) {
++				res = -ENOENT;
++				goto done;
++			}
++		}
++		res = ip_set_flush(index);
++		goto done;
++	}
++	case IP_SET_OP_RENAME:{
++		struct ip_set_req_create *req_rename
++			= (struct ip_set_req_create *) data;
++
++		if (len != sizeof(struct ip_set_req_create)) {
++			ip_set_printk("invalid RENAME data (want %zu, got %u)",
++				      sizeof(struct ip_set_req_create), len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
++		req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++			
++		index = ip_set_find_byname(req_rename->name);
++		if (index == IP_SET_INVALID_ID) {
++			res = -ENOENT;
++			goto done;
++		}
++		res = ip_set_rename(index, req_rename->typename);
++		goto done;
++	}
++	case IP_SET_OP_SWAP:{
++		struct ip_set_req_create *req_swap
++			= (struct ip_set_req_create *) data;
++		ip_set_id_t to_index;
++
++		if (len != sizeof(struct ip_set_req_create)) {
++			ip_set_printk("invalid SWAP data (want %zu, got %u)",
++				      sizeof(struct ip_set_req_create), len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
++		req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++
++		index = ip_set_find_byname(req_swap->name);
++		if (index == IP_SET_INVALID_ID) {
++			res = -ENOENT;
++			goto done;
++		}
++		to_index = ip_set_find_byname(req_swap->typename);
++		if (to_index == IP_SET_INVALID_ID) {
++			res = -ENOENT;
++			goto done;
++		}
++		res = ip_set_swap(index, to_index);
++		goto done;
++	}
++	default: 
++		break;	/* Set identified by id */
++	}
++	
++	/* There we may have add/del/test/bind/unbind/test_bind operations */
++	if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
++		res = -EBADMSG;
++		goto done;
++	}
++	adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
++
++	if (len < sizeof(struct ip_set_req_adt)) {
++		ip_set_printk("short data in adt request (want >=%zu, got %u)",
++			      sizeof(struct ip_set_req_adt), len);
++		res = -EINVAL;
++		goto done;
++	}
++	req_adt = (struct ip_set_req_adt *) data;
++
++	/* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
++	if (!(*op == IP_SET_OP_UNBIND_SET 
++	      && req_adt->index == IP_SET_INVALID_ID)) {
++		index = ip_set_find_byindex(req_adt->index);
++		if (index == IP_SET_INVALID_ID) {
++			res = -ENOENT;
++			goto done;
++		}
++	}
++	res = adtfn(index, data, len);
++
++    done:
++	up(&ip_set_app_mutex);
++	vfree(data);
++	if (res > 0)
++		res = 0;
++	DP("final result %d", res);
++	return res;
++}
++
++static int 
++ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
++{
++	int res = 0;
++	unsigned *op;
++	ip_set_id_t index = IP_SET_INVALID_ID;
++	void *data;
++	int copylen = *len;
++
++	DP("optval=%d, user=%p, len=%d", optval, user, *len);
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++	if (optval != SO_IP_SET)
++		return -EBADF;
++	if (*len < sizeof(unsigned)) {
++		ip_set_printk("short userdata (want >=%zu, got %d)",
++			      sizeof(unsigned), *len);
++		return -EINVAL;
++	}
++	data = vmalloc(*len);
++	if (!data) {
++		DP("out of mem for %d bytes", *len);
++		return -ENOMEM;
++	}
++	if (copy_from_user(data, user, *len) != 0) {
++		res = -EFAULT;
++		goto done;
++	}
++	if (down_interruptible(&ip_set_app_mutex)) {
++		res = -EINTR;
++		goto done;
++	}
++
++	op = (unsigned *) data;
++	DP("op=%x", *op);
++
++	if (*op < IP_SET_OP_VERSION) {
++		/* Check the version at the beginning of operations */
++		struct ip_set_req_version *req_version =
++			(struct ip_set_req_version *) data;
++		if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++			res = -EPROTO;
++			goto done;
++		}
++	}
++
++	switch (*op) {
++	case IP_SET_OP_VERSION: {
++		struct ip_set_req_version *req_version =
++		    (struct ip_set_req_version *) data;
++
++		if (*len != sizeof(struct ip_set_req_version)) {
++			ip_set_printk("invalid VERSION (want %zu, got %d)",
++				      sizeof(struct ip_set_req_version),
++				      *len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		req_version->version = IP_SET_PROTOCOL_VERSION;
++		res = copy_to_user(user, req_version,
++				   sizeof(struct ip_set_req_version));
++		goto done;
++	}
++	case IP_SET_OP_GET_BYNAME: {
++		struct ip_set_req_get_set *req_get
++			= (struct ip_set_req_get_set *) data;
++
++		if (*len != sizeof(struct ip_set_req_get_set)) {
++			ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
++				      sizeof(struct ip_set_req_get_set), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++		index = ip_set_find_byname(req_get->set.name);
++		req_get->set.index = index;
++		goto copy;
++	}
++	case IP_SET_OP_GET_BYINDEX: {
++		struct ip_set_req_get_set *req_get
++			= (struct ip_set_req_get_set *) data;
++
++		if (*len != sizeof(struct ip_set_req_get_set)) {
++			ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
++				      sizeof(struct ip_set_req_get_set), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++		index = ip_set_find_byindex(req_get->set.index);
++		strncpy(req_get->set.name,
++			index == IP_SET_INVALID_ID ? ""
++			: ip_set_list[index]->name, IP_SET_MAXNAMELEN);
++		goto copy;
++	}
++	case IP_SET_OP_ADT_GET: {
++		struct ip_set_req_adt_get *req_get
++			= (struct ip_set_req_adt_get *) data;
++
++		if (*len != sizeof(struct ip_set_req_adt_get)) {
++			ip_set_printk("invalid ADT_GET (want %zu, got %d)",
++				      sizeof(struct ip_set_req_adt_get), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++		index = ip_set_find_byname(req_get->set.name);
++		if (index != IP_SET_INVALID_ID) {
++			req_get->set.index = index;
++			strncpy(req_get->typename,
++				ip_set_list[index]->type->typename,
++				IP_SET_MAXNAMELEN - 1);
++		} else {
++			res = -ENOENT;
++			goto done;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_MAX_SETS: {
++		struct ip_set_req_max_sets *req_max_sets
++			= (struct ip_set_req_max_sets *) data;
++		ip_set_id_t i;
++
++		if (*len != sizeof(struct ip_set_req_max_sets)) {
++			ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
++				      sizeof(struct ip_set_req_max_sets), *len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++			req_max_sets->set.index = IP_SET_INVALID_ID;
++		} else {
++			req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++			req_max_sets->set.index = 
++				ip_set_find_byname(req_max_sets->set.name);
++			if (req_max_sets->set.index == IP_SET_INVALID_ID) {
++				res = -ENOENT;
++				goto done;
++			}
++		}
++		req_max_sets->max_sets = ip_set_max;
++		req_max_sets->sets = 0;
++		for (i = 0; i < ip_set_max; i++) {
++			if (ip_set_list[i] != NULL)
++				req_max_sets->sets++;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_LIST_SIZE: 
++	case IP_SET_OP_SAVE_SIZE: {
++		struct ip_set_req_setnames *req_setnames
++			= (struct ip_set_req_setnames *) data;
++		struct ip_set_name_list *name_list;
++		struct ip_set *set;
++		ip_set_id_t i;
++		int used;
++
++		if (*len < sizeof(struct ip_set_req_setnames)) {
++			ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
++				      sizeof(struct ip_set_req_setnames), *len);
++			res = -EINVAL;
++			goto done;
++		}
++
++		req_setnames->size = 0;
++		used = sizeof(struct ip_set_req_setnames);
++		for (i = 0; i < ip_set_max; i++) {
++			if (ip_set_list[i] == NULL)
++				continue;
++			name_list = (struct ip_set_name_list *) 
++				(data + used);
++			used += sizeof(struct ip_set_name_list);
++			if (used > copylen) {
++				res = -EAGAIN;
++				goto done;
++			}
++			set = ip_set_list[i];
++			/* Fill in index, name, etc. */
++			name_list->index = i;
++			name_list->id = set->id;
++			strncpy(name_list->name,
++				set->name,
++				IP_SET_MAXNAMELEN - 1);
++			strncpy(name_list->typename,
++				set->type->typename,
++				IP_SET_MAXNAMELEN - 1);
++			DP("filled %s of type %s, index %u\n",
++			   name_list->name, name_list->typename,
++			   name_list->index);
++			if (!(req_setnames->index == IP_SET_INVALID_ID
++			      || req_setnames->index == i))
++			      continue;
++			/* Update size */
++			switch (*op) {
++			case IP_SET_OP_LIST_SIZE: {
++				req_setnames->size += sizeof(struct ip_set_list)
++					+ set->type->header_size
++					+ set->type->list_members_size(set);
++				/* Sets are identified by id in the hash */
++				FOREACH_HASH_DO(__set_hash_bindings_size_list, 
++						set->id, &req_setnames->size);
++				break;
++			}
++			case IP_SET_OP_SAVE_SIZE: {
++				req_setnames->size += sizeof(struct ip_set_save)
++					+ set->type->header_size
++					+ set->type->list_members_size(set);
++				FOREACH_HASH_DO(__set_hash_bindings_size_save,
++						set->id, &req_setnames->size);
++				break;
++			}
++			default:
++				break;
++			}
++		}
++		if (copylen != used) {
++			res = -EAGAIN;
++			goto done;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_LIST: {
++		struct ip_set_req_list *req_list
++			= (struct ip_set_req_list *) data;
++		ip_set_id_t i;
++		int used;
++
++		if (*len < sizeof(struct ip_set_req_list)) {
++			ip_set_printk("short LIST (want >=%zu, got %d)",
++				      sizeof(struct ip_set_req_list), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		index = req_list->index;
++		if (index != IP_SET_INVALID_ID
++		    && ip_set_find_byindex(index) != index) {
++		    	res = -ENOENT;
++		    	goto done;
++		}
++		used = 0;
++		if (index == IP_SET_INVALID_ID) {
++			/* List all sets */
++			for (i = 0; i < ip_set_max && res == 0; i++) {
++				if (ip_set_list[i] != NULL)
++					res = ip_set_list_set(i, data, &used, *len);
++			}
++		} else {
++			/* List an individual set */
++			res = ip_set_list_set(index, data, &used, *len);
++		}
++		if (res != 0)
++			goto done;
++		else if (copylen != used) {
++			res = -EAGAIN;
++			goto done;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_SAVE: {
++		struct ip_set_req_list *req_save
++			= (struct ip_set_req_list *) data;
++		ip_set_id_t i;
++		int used;
++
++		if (*len < sizeof(struct ip_set_req_list)) {
++			ip_set_printk("short SAVE (want >=%zu, got %d)",
++				      sizeof(struct ip_set_req_list), *len);
++			res = -EINVAL;
++			goto done;
++		}
++		index = req_save->index;
++		if (index != IP_SET_INVALID_ID
++		    && ip_set_find_byindex(index) != index) {
++		    	res = -ENOENT;
++		    	goto done;
++		}
++		used = 0;
++		if (index == IP_SET_INVALID_ID) {
++			/* Save all sets */
++			for (i = 0; i < ip_set_max && res == 0; i++) {
++				if (ip_set_list[i] != NULL)
++					res = ip_set_save_set(i, data, &used, *len);
++			}
++		} else {
++			/* Save an individual set */
++			res = ip_set_save_set(index, data, &used, *len);
++		}
++		if (res == 0)
++			res = ip_set_save_bindings(index, data, &used, *len);
++			
++		if (res != 0)
++			goto done;
++		else if (copylen != used) {
++			res = -EAGAIN;
++			goto done;
++		}
++		goto copy;
++	}
++	case IP_SET_OP_RESTORE: {
++		struct ip_set_req_setnames *req_restore
++			= (struct ip_set_req_setnames *) data;
++		int line;
++
++		if (*len < sizeof(struct ip_set_req_setnames)
++		    || *len != req_restore->size) {
++			ip_set_printk("invalid RESTORE (want =%zu, got %d)",
++				      req_restore->size, *len);
++			res = -EINVAL;
++			goto done;
++		}
++		line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
++				      req_restore->size - sizeof(struct ip_set_req_setnames));
++		DP("ip_set_restore: %u", line);
++		if (line != 0) {
++			res = -EAGAIN;
++			req_restore->size = line;
++			copylen = sizeof(struct ip_set_req_setnames);
++			goto copy;
++		}
++		goto done;
++	}
++	default:
++		res = -EBADMSG;
++		goto done;
++	}	/* end of switch(op) */
++
++    copy:
++   	DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++   	             		 && ip_set_list[index]
++   	             ? ip_set_list[index]->name
++   	             : ":all:", copylen);
++	res = copy_to_user(user, data, copylen);
++    	
++    done:
++	up(&ip_set_app_mutex);
++	vfree(data);
++	if (res > 0)
++		res = 0;
++	DP("final result %d", res);
++	return res;
++}
++
++static struct nf_sockopt_ops so_set = {
++	.pf 		= PF_INET,
++	.set_optmin 	= SO_IP_SET,
++	.set_optmax 	= SO_IP_SET + 1,
++	.set 		= &ip_set_sockfn_set,
++	.get_optmin 	= SO_IP_SET,
++	.get_optmax	= SO_IP_SET + 1,
++	.get		= &ip_set_sockfn_get,
++	.use		= 0
++};
++
++static int max_sets, hash_size;
++module_param(max_sets, int, 0600);
++MODULE_PARM_DESC(max_sets, "maximal number of sets");
++module_param(hash_size, int, 0600);
++MODULE_PARM_DESC(hash_size, "hash size for bindings");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("module implementing core IP set support");
++
++static int __init init(void)
++{
++	int res;
++	ip_set_id_t i;
++
++	get_random_bytes(&ip_set_hash_random, 4);
++	if (max_sets)
++		ip_set_max = max_sets;
++	ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
++	if (!ip_set_list) {
++		printk(KERN_ERR "Unable to create ip_set_list\n");
++		return -ENOMEM;
++	}
++	memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
++	if (hash_size)
++		ip_set_bindings_hash_size = hash_size;
++	ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
++	if (!ip_set_hash) {
++		printk(KERN_ERR "Unable to create ip_set_hash\n");
++		vfree(ip_set_list);
++		return -ENOMEM;
++	}
++	for (i = 0; i < ip_set_bindings_hash_size; i++)
++		INIT_LIST_HEAD(&ip_set_hash[i]);
++
++	INIT_LIST_HEAD(&set_type_list);
++
++	res = nf_register_sockopt(&so_set);
++	if (res != 0) {
++		ip_set_printk("SO_SET registry failed: %d", res);
++		vfree(ip_set_list);
++		vfree(ip_set_hash);
++		return res;
++	}
++	return 0;
++}
++
++static void __exit fini(void)
++{
++	/* There can't be any existing set or binding */
++	nf_unregister_sockopt(&so_set);
++	vfree(ip_set_list);
++	vfree(ip_set_hash);
++	DP("these are the famous last words");
++}
++
++EXPORT_SYMBOL(ip_set_register_set_type);
++EXPORT_SYMBOL(ip_set_unregister_set_type);
++
++EXPORT_SYMBOL(ip_set_get_byname);
++EXPORT_SYMBOL(ip_set_get_byindex);
++EXPORT_SYMBOL(ip_set_put);
++
++EXPORT_SYMBOL(ip_set_addip_kernel);
++EXPORT_SYMBOL(ip_set_delip_kernel);
++EXPORT_SYMBOL(ip_set_testip_kernel);
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iphash.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_iphash.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iphash.c	2007-06-08 16:29:31.829808250 -0500
+@@ -0,0 +1,413 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an ip hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_iphash.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
++{
++	return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	__u32 id;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++
++	*hash_ip = ip & map->netmask;
++	DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
++	
++	for (i = 0; i < map->probes; i++) {
++		id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++		DP("hash key: %u", id);
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++		if (*elem == *hash_ip)
++			return id;
++		/* No shortcut at testing - there can be deleted
++		 * entries. */
++	}
++	return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iphash *req = 
++	    (struct ip_set_req_iphash *) data;
++
++	if (size != sizeof(struct ip_set_req_iphash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iphash),
++			      size);
++		return -EINVAL;
++	}
++	return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	return __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++			hash_ip);
++}
++
++static inline int
++__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	__u32 probe;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++	
++	if (!ip || map->elements > limit)
++		return -ERANGE;
++
++	*hash_ip = ip & map->netmask;
++	
++	for (i = 0; i < map->probes; i++) {
++		probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++		if (*elem == *hash_ip)
++			return -EEXIST;
++		if (!*elem) {
++			*elem = *hash_ip;
++			map->elements++;
++			return 0;
++		}
++	}
++	/* Trigger rehashing */
++	return -EAGAIN;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iphash *req = 
++	    (struct ip_set_req_iphash *) data;
++
++	if (size != sizeof(struct ip_set_req_iphash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iphash),
++			      size);
++		return -EINVAL;
++	}
++	return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __addip((struct ip_set_iphash *) set->data,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	ip_set_ip_t hash_ip, *elem;
++	void *members;
++	u_int32_t i, hashsize = map->hashsize;
++	int res;
++	struct ip_set_iphash *tmp;
++	
++	if (map->resize == 0)
++		return -ERANGE;
++
++    again:
++    	res = 0;
++    	
++	/* Calculate new hash size */
++	hashsize += (hashsize * map->resize)/100;
++	if (hashsize == map->hashsize)
++		hashsize++;
++	
++	ip_set_printk("rehashing of set %s triggered: "
++		      "hashsize grows from %u to %u",
++		      set->name, map->hashsize, hashsize);
++
++	tmp = kmalloc(sizeof(struct ip_set_iphash) 
++		      + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++	if (!tmp) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_iphash)
++		   + map->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++	if (!tmp->members) {
++		DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++		kfree(tmp);
++		return -ENOMEM;
++	}
++	tmp->hashsize = hashsize;
++	tmp->elements = 0;
++	tmp->probes = map->probes;
++	tmp->resize = map->resize;
++	tmp->netmask = map->netmask;
++	memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++	
++	write_lock_bh(&set->lock);
++	map = (struct ip_set_iphash *) set->data; /* Play safe */
++	for (i = 0; i < map->hashsize && res == 0; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		if (*elem)
++			res = __addip(tmp, *elem, &hash_ip);
++	}
++	if (res) {
++		/* Failure, try again */
++		write_unlock_bh(&set->lock);
++		harray_free(tmp->members);
++		kfree(tmp);
++		goto again;
++	}
++	
++	/* Success at resizing! */
++	members = map->members;
++
++	map->hashsize = tmp->hashsize;
++	map->members = tmp->members;
++	write_unlock_bh(&set->lock);
++
++	harray_free(members);
++	kfree(tmp);
++
++	return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	ip_set_ip_t id, *elem;
++
++	if (!ip)
++		return -ERANGE;
++
++	id = hash_id(set, ip, hash_ip);
++	if (id == UINT_MAX)
++		return -EEXIST;
++		
++	elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++	*elem = 0;
++	map->elements--;
++
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iphash *req =
++	    (struct ip_set_req_iphash *) data;
++
++	if (size != sizeof(struct ip_set_req_iphash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iphash),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	struct ip_set_req_iphash_create *req =
++	    (struct ip_set_req_iphash_create *) data;
++	struct ip_set_iphash *map;
++	uint16_t i;
++
++	if (size != sizeof(struct ip_set_req_iphash_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			       sizeof(struct ip_set_req_iphash_create),
++			       size);
++		return -EINVAL;
++	}
++
++	if (req->hashsize < 1) {
++		ip_set_printk("hashsize too small");
++		return -ENOEXEC;
++	}
++
++	if (req->probes < 1) {
++		ip_set_printk("probes too small");
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_iphash) 
++		      + req->probes * sizeof(uint32_t), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_iphash)
++		   + req->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	for (i = 0; i < req->probes; i++)
++		get_random_bytes(((uint32_t *) map->initval)+i, 4);
++	map->elements = 0;
++	map->hashsize = req->hashsize;
++	map->probes = req->probes;
++	map->resize = req->resize;
++	map->netmask = req->netmask;
++	map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++	if (!map->members) {
++		DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++		kfree(map);
++		return -ENOMEM;
++	}
++
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++	harray_free(map->members);
++	kfree(map);
++
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++	map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	struct ip_set_req_iphash_create *header =
++	    (struct ip_set_req_iphash_create *) data;
++
++	header->hashsize = map->hashsize;
++	header->probes = map->probes;
++	header->resize = map->resize;
++	header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++	return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++	ip_set_ip_t i, *elem;
++
++	for (i = 0; i < map->hashsize; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		((ip_set_ip_t *)data)[i] = *elem;
++	}
++}
++
++static struct ip_set_type ip_set_iphash = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_iphash),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.retry			= &retry,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_iphash_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init init(void)
++{
++	return ip_set_register_set_type(&ip_set_iphash);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_iphash);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipmap.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipmap.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipmap.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,327 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an IP set type: the single bitmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipmap.h>
++
++static inline ip_set_ip_t
++ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
++{
++	return (ip - map->first_ip)/map->hosts;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = ip & map->netmask;
++	DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++	return !!test_bit(ip_to_id(map, *hash_ip), map->members);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipmap *req = 
++	    (struct ip_set_req_ipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_ipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipmap),
++			      size);
++		return -EINVAL;
++	}
++	return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	int res;
++	
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++
++	res =  __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++			hash_ip);
++	return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = ip & map->netmask;
++	DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++	if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
++		return -EEXIST;
++
++	return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipmap *req = 
++	    (struct ip_set_req_ipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_ipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipmap),
++			      size);
++		return -EINVAL;
++	}
++	DP("%u.%u.%u.%u", HIPQUAD(req->ip));
++	return __addip(set, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __addip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static inline int 
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = ip & map->netmask;
++	DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++	if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
++		return -EEXIST;
++	
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipmap *req =
++	    (struct ip_set_req_ipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_ipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipmap),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	int newbytes;
++	struct ip_set_req_ipmap_create *req =
++	    (struct ip_set_req_ipmap_create *) data;
++	struct ip_set_ipmap *map;
++
++	if (size != sizeof(struct ip_set_req_ipmap_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipmap_create),
++			      size);
++		return -EINVAL;
++	}
++
++	DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++	   HIPQUAD(req->from), HIPQUAD(req->to));
++
++	if (req->from > req->to) {
++		DP("bad ip range");
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_ipmap));
++		return -ENOMEM;
++	}
++	map->first_ip = req->from;
++	map->last_ip = req->to;
++	map->netmask = req->netmask;
++
++	if (req->netmask == 0xFFFFFFFF) {
++		map->hosts = 1;
++		map->sizeid = map->last_ip - map->first_ip + 1;
++	} else {
++		unsigned int mask_bits, netmask_bits;
++		ip_set_ip_t mask;
++		
++		map->first_ip &= map->netmask;	/* Should we better bark? */
++		
++		mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
++		netmask_bits = mask_to_bits(map->netmask);
++		
++		if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
++		    || netmask_bits <= mask_bits)
++			return -ENOEXEC;
++
++		DP("mask_bits %u, netmask_bits %u",
++		   mask_bits, netmask_bits);
++		map->hosts = 2 << (32 - netmask_bits - 1);
++		map->sizeid = 2 << (netmask_bits - mask_bits - 1);
++	}
++	if (map->sizeid > MAX_RANGE + 1) {
++		ip_set_printk("range too big (max %d addresses)",
++			       MAX_RANGE+1);
++		kfree(map);
++		return -ENOEXEC;
++	}
++	DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
++	newbytes = bitmap_bytes(0, map->sizeid - 1);
++	map->members = kmalloc(newbytes, GFP_KERNEL);
++	if (!map->members) {
++		DP("out of memory for %d bytes", newbytes);
++		kfree(map);
++		return -ENOMEM;
++	}
++	memset(map->members, 0, newbytes);
++	
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	
++	kfree(map->members);
++	kfree(map);
++	
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	struct ip_set_req_ipmap_create *header =
++	    (struct ip_set_req_ipmap_create *) data;
++
++	header->from = map->first_ip;
++	header->to = map->last_ip;
++	header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++	return bitmap_bytes(0, map->sizeid - 1);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++	int bytes = bitmap_bytes(0, map->sizeid - 1);
++
++	memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_ipmap = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_ipmap),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_ipmap_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipmap type of IP sets");
++
++static int __init init(void)
++{
++	return ip_set_register_set_type(&ip_set_ipmap);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_ipmap);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipporthash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipporthash.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipporthash.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipporthash.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,535 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an ip+port hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++
++static int limit = MAX_RANGE;
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++	struct iphdr *iph = skb->nh.iph;
++	u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++
++	switch (iph->protocol) {
++	case IPPROTO_TCP: {
++		struct tcphdr tcph;
++		
++		/* See comments at tcp_match in ip_tables.c */
++		if (offset)
++			return INVALID_PORT;
++
++		if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++			/* No choice either */
++			return INVALID_PORT;
++	     	
++	     	return ntohs(flags & IPSET_SRC ?
++			     tcph.source : tcph.dest);
++	    }
++	case IPPROTO_UDP: {
++		struct udphdr udph;
++
++		if (offset)
++			return INVALID_PORT;
++
++		if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++			/* No choice either */
++			return INVALID_PORT;
++	     	
++	     	return ntohs(flags & IPSET_SRC ?
++			     udph.source : udph.dest);
++	    }
++	default:
++		return INVALID_PORT;
++	}
++}
++
++static inline __u32
++jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++{
++	return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++	ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipporthash *map = 
++		(struct ip_set_ipporthash *) set->data;
++	__u32 id;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++
++	*hash_ip = HASH_IP(map, ip, port);
++	DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++	
++	for (i = 0; i < map->probes; i++) {
++		id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++		DP("hash key: %u", id);
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++		if (*elem == *hash_ip)
++			return id;
++		/* No shortcut at testing - there can be deleted
++		 * entries. */
++	}
++	return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++	 ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipporthash *req = 
++	    (struct ip_set_req_ipporthash *) data;
++
++	if (size != sizeof(struct ip_set_req_ipporthash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipporthash),
++			      size);
++		return -EINVAL;
++	}
++	return __testip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	ip_set_ip_t port;
++
++	if (flags[index+1] == 0)
++		return -EINVAL;
++		
++	port = get_port(skb, flags[index+1]);
++
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++	DP("flag %s port %u",
++	   flags[index+1] & IPSET_SRC ? "SRC" : "DST", 
++	   port);	
++	if (port == INVALID_PORT)
++		return 0;	
++
++	return __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++					? skb->nh.iph->saddr 
++					: skb->nh.iph->daddr),
++			port,
++			hash_ip);
++}
++
++static inline int
++__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++{
++	__u32 probe;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++
++	for (i = 0; i < map->probes; i++) {
++		probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++		if (*elem == hash_ip)
++			return -EEXIST;
++		if (!*elem) {
++			*elem = hash_ip;
++			map->elements++;
++			return 0;
++		}
++	}
++	/* Trigger rehashing */
++	return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
++	ip_set_ip_t *hash_ip)
++{
++	if (map->elements > limit)
++		return -ERANGE;
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = HASH_IP(map, ip, port);
++	
++	return __add_haship(map, *hash_ip);
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipporthash *req = 
++	    (struct ip_set_req_ipporthash *) data;
++
++	if (size != sizeof(struct ip_set_req_ipporthash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipporthash),
++			      size);
++		return -EINVAL;
++	}
++	return __addip((struct ip_set_ipporthash *) set->data, 
++			req->ip, req->port, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	ip_set_ip_t port;
++
++	if (flags[index+1] == 0)
++		return -EINVAL;
++		
++	port = get_port(skb, flags[index+1]);
++
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++	DP("flag %s port %u", 
++	   flags[index+1] & IPSET_SRC ? "SRC" : "DST", 
++	   port);	
++	if (port == INVALID_PORT)
++		return -EINVAL;	
++
++	return __addip((struct ip_set_ipporthash *) set->data,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       port,
++		       hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	ip_set_ip_t *elem;
++	void *members;
++	u_int32_t i, hashsize = map->hashsize;
++	int res;
++	struct ip_set_ipporthash *tmp;
++	
++	if (map->resize == 0)
++		return -ERANGE;
++
++    again:
++    	res = 0;
++    	
++	/* Calculate new hash size */
++	hashsize += (hashsize * map->resize)/100;
++	if (hashsize == map->hashsize)
++		hashsize++;
++	
++	ip_set_printk("rehashing of set %s triggered: "
++		      "hashsize grows from %u to %u",
++		      set->name, map->hashsize, hashsize);
++
++	tmp = kmalloc(sizeof(struct ip_set_ipporthash) 
++		      + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++	if (!tmp) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_ipporthash)
++		   + map->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++	if (!tmp->members) {
++		DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++		kfree(tmp);
++		return -ENOMEM;
++	}
++	tmp->hashsize = hashsize;
++	tmp->elements = 0;
++	tmp->probes = map->probes;
++	tmp->resize = map->resize;
++	tmp->first_ip = map->first_ip;
++	tmp->last_ip = map->last_ip;
++	memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++	
++	write_lock_bh(&set->lock);
++	map = (struct ip_set_ipporthash *) set->data; /* Play safe */
++	for (i = 0; i < map->hashsize && res == 0; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		if (*elem)
++			res = __add_haship(tmp, *elem);
++	}
++	if (res) {
++		/* Failure, try again */
++		write_unlock_bh(&set->lock);
++		harray_free(tmp->members);
++		kfree(tmp);
++		goto again;
++	}
++	
++	/* Success at resizing! */
++	members = map->members;
++
++	map->hashsize = tmp->hashsize;
++	map->members = tmp->members;
++	write_unlock_bh(&set->lock);
++
++	harray_free(members);
++	kfree(tmp);
++
++	return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++	ip_set_ip_t *hash_ip)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	ip_set_ip_t id;
++	ip_set_ip_t *elem;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++
++	id = hash_id(set, ip, port, hash_ip);
++
++	if (id == UINT_MAX)
++		return -EEXIST;
++		
++	elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++	*elem = 0;
++	map->elements--;
++
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_ipporthash *req =
++	    (struct ip_set_req_ipporthash *) data;
++
++	if (size != sizeof(struct ip_set_req_ipporthash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_ipporthash),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	ip_set_ip_t port;
++
++	if (flags[index+1] == 0)
++		return -EINVAL;
++		
++	port = get_port(skb, flags[index+1]);
++
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++	DP("flag %s port %u",
++	   flags[index+1] & IPSET_SRC ? "SRC" : "DST", 
++	   port);	
++	if (port == INVALID_PORT)
++		return -EINVAL;	
++
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       port,
++		       hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	struct ip_set_req_ipporthash_create *req =
++	    (struct ip_set_req_ipporthash_create *) data;
++	struct ip_set_ipporthash *map;
++	uint16_t i;
++
++	if (size != sizeof(struct ip_set_req_ipporthash_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			       sizeof(struct ip_set_req_ipporthash_create),
++			       size);
++		return -EINVAL;
++	}
++
++	if (req->hashsize < 1) {
++		ip_set_printk("hashsize too small");
++		return -ENOEXEC;
++	}
++
++	if (req->probes < 1) {
++		ip_set_printk("probes too small");
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_ipporthash) 
++		      + req->probes * sizeof(uint32_t), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_ipporthash)
++		   + req->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	for (i = 0; i < req->probes; i++)
++		get_random_bytes(((uint32_t *) map->initval)+i, 4);
++	map->elements = 0;
++	map->hashsize = req->hashsize;
++	map->probes = req->probes;
++	map->resize = req->resize;
++	map->first_ip = req->from;
++	map->last_ip = req->to;
++	map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++	if (!map->members) {
++		DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++		kfree(map);
++		return -ENOMEM;
++	}
++
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++	harray_free(map->members);
++	kfree(map);
++
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++	map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	struct ip_set_req_ipporthash_create *header =
++	    (struct ip_set_req_ipporthash_create *) data;
++
++	header->hashsize = map->hashsize;
++	header->probes = map->probes;
++	header->resize = map->resize;
++	header->from = map->first_ip;
++	header->to = map->last_ip;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++	return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++	ip_set_ip_t i, *elem;
++
++	for (i = 0; i < map->hashsize; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		((ip_set_ip_t *)data)[i] = *elem;
++	}
++}
++
++static struct ip_set_type ip_set_ipporthash = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_ipporthash),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.retry			= &retry,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_ipporthash_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init init(void)
++{
++	return ip_set_register_set_type(&ip_set_ipporthash);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_ipporthash);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iptree.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_iptree.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iptree.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,571 @@
++/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an IP set type: the iptree type */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++
++#include <linux/netfilter_ipv4/ip_set_iptree.h>
++
++static int limit = MAX_RANGE;
++
++/* Garbage collection interval in seconds: */
++#define IPTREE_GC_TIME		5*60
++/* Sleep so many milliseconds before trying again 
++ * to delete the gc timer at destroying/flushing a set */ 
++#define IPTREE_DESTROY_SLEEP	100
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct kmem_cache *branch_cachep;
++static struct kmem_cache *leaf_cachep;
++#else
++static kmem_cache_t *branch_cachep;
++static kmem_cache_t *leaf_cachep;
++#endif
++
++#define ABCD(a,b,c,d,addrp) do {		\
++	a = ((unsigned char *)addrp)[3];	\
++	b = ((unsigned char *)addrp)[2];	\
++	c = ((unsigned char *)addrp)[1];	\
++	d = ((unsigned char *)addrp)[0];	\
++} while (0)
++
++#define TESTIP_WALK(map, elem, branch) do {	\
++	if ((map)->tree[elem]) {		\
++		branch = (map)->tree[elem];	\
++	} else 					\
++		return 0;			\
++} while (0)
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned char a,b,c,d;
++
++	if (!ip)
++		return -ERANGE;
++	
++	*hash_ip = ip;
++	ABCD(a, b, c, d, hash_ip);
++	DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
++	TESTIP_WALK(map, a, btree);
++	TESTIP_WALK(btree, b, ctree);
++	TESTIP_WALK(ctree, c, dtree);
++	DP("%lu %lu", dtree->expires[d], jiffies);
++	return !!(map->timeout ? (time_after(dtree->expires[d], jiffies))
++			       : dtree->expires[d]);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iptree *req = 
++	    (struct ip_set_req_iptree *) data;
++
++	if (size != sizeof(struct ip_set_req_iptree)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iptree),
++			      size);
++		return -EINVAL;
++	}
++	return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	int res;
++	
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++
++	res =  __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++			hash_ip);
++	return (res < 0 ? 0 : res);
++}
++
++#define ADDIP_WALK(map, elem, branch, type, cachep, flags) do {	\
++	if ((map)->tree[elem]) {				\
++		DP("found %u", elem);				\
++		branch = (map)->tree[elem];			\
++	} else {						\
++		branch = (type *)				\
++			kmem_cache_alloc(cachep, flags);	\
++		if (branch == NULL)				\
++			return -ENOMEM;				\
++		memset(branch, 0, sizeof(*branch));		\
++		(map)->tree[elem] = branch;			\
++		DP("alloc %u", elem);				\
++	}							\
++} while (0)	
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
++	ip_set_ip_t *hash_ip,
++	unsigned int __nocast flags)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned char a,b,c,d;
++	int ret = 0;
++	
++	if (!ip || map->elements > limit)
++		/* We could call the garbage collector
++		 * but it's probably overkill */
++		return -ERANGE;
++	
++	*hash_ip = ip;
++	ABCD(a, b, c, d, hash_ip);
++	DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
++	ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep, flags);
++	ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep, flags);
++	ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep, flags);
++	if (dtree->expires[d]
++	    && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++	    	ret = -EEXIST;
++	dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
++	/* Lottery */
++	if (dtree->expires[d] == 0)
++		dtree->expires[d] = 1;
++	DP("%u %lu", d, dtree->expires[d]);
++	if (ret == 0)
++		map->elements++;
++	return ret;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_req_iptree *req = 
++		(struct ip_set_req_iptree *) data;
++
++	if (size != sizeof(struct ip_set_req_iptree)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iptree),
++			      size);
++		return -EINVAL;
++	}
++	DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
++	return __addip(set, req->ip,
++		       req->timeout ? req->timeout : map->timeout,
++		       hash_ip,
++		       GFP_ATOMIC);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++	return __addip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       map->timeout,
++		       hash_ip,
++		       GFP_ATOMIC);
++}
++
++#define DELIP_WALK(map, elem, branch) do {	\
++	if ((map)->tree[elem]) {		\
++		branch = (map)->tree[elem];	\
++	} else 					\
++		return -EEXIST;			\
++} while (0)
++
++static inline int 
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned char a,b,c,d;
++	
++	if (!ip)
++		return -ERANGE;
++		
++	*hash_ip = ip;
++	ABCD(a, b, c, d, hash_ip);
++	DELIP_WALK(map, a, btree);
++	DELIP_WALK(btree, b, ctree);
++	DELIP_WALK(ctree, c, dtree);
++
++	if (dtree->expires[d]) {
++		dtree->expires[d] = 0;
++		map->elements--;
++		return 0;
++	}
++	return -EEXIST;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_iptree *req =
++	    (struct ip_set_req_iptree *) data;
++
++	if (size != sizeof(struct ip_set_req_iptree)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iptree),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++#define LOOP_WALK_BEGIN(map, i, branch) \
++	for (i = 0; i < 256; i++) {	\
++		if (!(map)->tree[i])	\
++			continue;	\
++		branch = (map)->tree[i]
++
++#define LOOP_WALK_END }
++
++static void ip_tree_gc(unsigned long ul_set)
++{
++	struct ip_set *set = (void *) ul_set;
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned int a,b,c,d;
++	unsigned char i,j,k;
++
++	i = j = k = 0;
++	DP("gc: %s", set->name);
++	write_lock_bh(&set->lock);
++	LOOP_WALK_BEGIN(map, a, btree);
++	LOOP_WALK_BEGIN(btree, b, ctree);
++	LOOP_WALK_BEGIN(ctree, c, dtree);
++	for (d = 0; d < 256; d++) {
++		if (dtree->expires[d]) {
++			DP("gc: %u %u %u %u: expires %lu jiffies %lu",
++			    a, b, c, d,
++			    dtree->expires[d], jiffies);
++			if (map->timeout
++			    && time_before(dtree->expires[d], jiffies)) {
++			    	dtree->expires[d] = 0;
++			    	map->elements--;
++			} else
++				k = 1;
++		}
++	}
++	if (k == 0) {
++		DP("gc: %s: leaf %u %u %u empty",
++		    set->name, a, b, c);
++		kmem_cache_free(leaf_cachep, dtree);
++		ctree->tree[c] = NULL;
++	} else {
++		DP("gc: %s: leaf %u %u %u not empty",
++		    set->name, a, b, c);
++		j = 1;
++		k = 0;
++	}
++	LOOP_WALK_END;
++	if (j == 0) {
++		DP("gc: %s: branch %u %u empty",
++		    set->name, a, b);
++		kmem_cache_free(branch_cachep, ctree);
++		btree->tree[b] = NULL;
++	} else {
++		DP("gc: %s: branch %u %u not empty",
++		    set->name, a, b);
++		i = 1;
++		j = k = 0;
++	}
++	LOOP_WALK_END;
++	if (i == 0) {
++		DP("gc: %s: branch %u empty",
++		    set->name, a);
++		kmem_cache_free(branch_cachep, btree);
++		map->tree[a] = NULL;
++	} else {
++		DP("gc: %s: branch %u not empty",
++		    set->name, a);
++		i = j = k = 0;
++	}
++	LOOP_WALK_END;
++	write_unlock_bh(&set->lock);
++	
++	map->gc.expires = jiffies + map->gc_interval * HZ;
++	add_timer(&map->gc);
++}
++
++static inline void init_gc_timer(struct ip_set *set)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++	/* Even if there is no timeout for the entries,
++	 * we still have to call gc because delete
++	 * do not clean up empty branches */
++	map->gc_interval = IPTREE_GC_TIME;
++	init_timer(&map->gc);
++	map->gc.data = (unsigned long) set;
++	map->gc.function = ip_tree_gc;
++	map->gc.expires = jiffies + map->gc_interval * HZ;
++	add_timer(&map->gc);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	struct ip_set_req_iptree_create *req =
++	    (struct ip_set_req_iptree_create *) data;
++	struct ip_set_iptree *map;
++
++	if (size != sizeof(struct ip_set_req_iptree_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_iptree_create),
++			      size);
++		return -EINVAL;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_iptree));
++		return -ENOMEM;
++	}
++	memset(map, 0, sizeof(*map));
++	map->timeout = req->timeout;
++	map->elements = 0;
++	set->data = map;
++
++	init_gc_timer(set);
++
++	return 0;
++}
++
++static void __flush(struct ip_set_iptree *map)
++{
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned int a,b,c;
++
++	LOOP_WALK_BEGIN(map, a, btree);
++	LOOP_WALK_BEGIN(btree, b, ctree);
++	LOOP_WALK_BEGIN(ctree, c, dtree);
++	kmem_cache_free(leaf_cachep, dtree);
++	LOOP_WALK_END;
++	kmem_cache_free(branch_cachep, ctree);
++	LOOP_WALK_END;
++	kmem_cache_free(branch_cachep, btree);
++	LOOP_WALK_END;
++	map->elements = 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++	/* gc might be running */
++	while (!del_timer(&map->gc))
++		msleep(IPTREE_DESTROY_SLEEP);
++	__flush(map);
++	kfree(map);
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	unsigned int timeout = map->timeout;
++	
++	/* gc might be running */
++	while (!del_timer(&map->gc))
++		msleep(IPTREE_DESTROY_SLEEP);
++	__flush(map);
++	memset(map, 0, sizeof(*map));
++	map->timeout = timeout;
++
++	init_gc_timer(set);
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_req_iptree_create *header =
++	    (struct ip_set_req_iptree_create *) data;
++
++	header->timeout = map->timeout;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned int a,b,c,d;
++	unsigned int count = 0;
++
++	LOOP_WALK_BEGIN(map, a, btree);
++	LOOP_WALK_BEGIN(btree, b, ctree);
++	LOOP_WALK_BEGIN(ctree, c, dtree);
++	for (d = 0; d < 256; d++) {
++		if (dtree->expires[d]
++		    && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++		    	count++;
++	}
++	LOOP_WALK_END;
++	LOOP_WALK_END;
++	LOOP_WALK_END;
++
++	DP("members %u", count);
++	return (count * sizeof(struct ip_set_req_iptree));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++	struct ip_set_iptreeb *btree;
++	struct ip_set_iptreec *ctree;
++	struct ip_set_iptreed *dtree;
++	unsigned int a,b,c,d;
++	size_t offset = 0;
++	struct ip_set_req_iptree *entry;
++
++	LOOP_WALK_BEGIN(map, a, btree);
++	LOOP_WALK_BEGIN(btree, b, ctree);
++	LOOP_WALK_BEGIN(ctree, c, dtree);
++	for (d = 0; d < 256; d++) {
++		if (dtree->expires[d]
++		    && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
++		    	entry = (struct ip_set_req_iptree *)(data + offset);
++		    	entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
++		    	entry->timeout = !map->timeout ? 0 
++				: (dtree->expires[d] - jiffies)/HZ;
++			offset += sizeof(struct ip_set_req_iptree);
++		}
++	}
++	LOOP_WALK_END;
++	LOOP_WALK_END;
++	LOOP_WALK_END;
++}
++
++static struct ip_set_type ip_set_iptree = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_iptree),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_iptree_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptree type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init init(void)
++{
++	int ret;
++	
++	branch_cachep = kmem_cache_create("ip_set_iptreeb",
++				sizeof(struct ip_set_iptreeb),
++				0, 0, NULL, NULL);
++	if (!branch_cachep) {
++		printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
++		ret = -ENOMEM;
++		goto out;
++	}
++	leaf_cachep = kmem_cache_create("ip_set_iptreed",
++				sizeof(struct ip_set_iptreed),
++				0, 0, NULL, NULL);
++	if (!leaf_cachep) {
++		printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
++		ret = -ENOMEM;
++		goto free_branch;
++	}
++	ret = ip_set_register_set_type(&ip_set_iptree);
++	if (ret == 0)
++		goto out;
++
++	kmem_cache_destroy(leaf_cachep);
++    free_branch:	
++	kmem_cache_destroy(branch_cachep);
++    out:
++	return ret;
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_iptree);
++	kmem_cache_destroy(leaf_cachep);
++	kmem_cache_destroy(branch_cachep);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_macipmap.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_macipmap.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_macipmap.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,353 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ *                         Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing an IP set type: the macipmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/if_ether.h>
++#include <linux/vmalloc.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_macipmap.h>
++
++static int
++testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
++	struct ip_set_macip *table = (struct ip_set_macip *) map->members;	
++	struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_macipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_macipmap),
++			      size);
++		return -EINVAL;
++	}
++
++	if (req->ip < map->first_ip || req->ip > map->last_ip)
++		return -ERANGE;
++
++	*hash_ip = req->ip;
++	DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));		
++	if (test_bit(IPSET_MACIP_ISSET,
++		     (void *) &table[req->ip - map->first_ip].flags)) {
++		return (memcmp(req->ethernet,
++			       &table[req->ip - map->first_ip].ethernet,
++			       ETH_ALEN) == 0);
++	} else {
++		return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++	}
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	struct ip_set_macip *table =
++	    (struct ip_set_macip *) map->members;
++	ip_set_ip_t ip;
++	
++	ip = ntohl(flags[index] & IPSET_SRC
++			? skb->nh.iph->saddr
++			: skb->nh.iph->daddr);
++	DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++	   flags[index] & IPSET_SRC ? "SRC" : "DST",
++	   NIPQUAD(skb->nh.iph->saddr),
++	   NIPQUAD(skb->nh.iph->daddr));
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return 0;
++
++	*hash_ip = ip;	
++	DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++	   set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));		
++	if (test_bit(IPSET_MACIP_ISSET,
++	    (void *) &table[ip - map->first_ip].flags)) {
++		/* Is mac pointer valid?
++		 * If so, compare... */
++		return (skb->mac.raw >= skb->head
++			&& (skb->mac.raw + ETH_HLEN) <= skb->data
++			&& (memcmp(eth_hdr(skb)->h_source,
++				   &table[ip - map->first_ip].ethernet,
++				   ETH_ALEN) == 0));
++	} else {
++		return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++	}
++}
++
++/* returns 0 on success */
++static inline int
++__addip(struct ip_set *set, 
++	ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	struct ip_set_macip *table =
++	    (struct ip_set_macip *) map->members;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++	if (test_and_set_bit(IPSET_MACIP_ISSET, 
++			     (void *) &table[ip - map->first_ip].flags))
++		return -EEXIST;
++
++	*hash_ip = ip;
++	DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++	memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++	return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_macipmap *req =
++	    (struct ip_set_req_macipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_macipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_macipmap),
++			      size);
++		return -EINVAL;
++	}
++	return __addip(set, req->ip, req->ethernet, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	ip_set_ip_t ip;
++	
++	ip = ntohl(flags[index] & IPSET_SRC
++			? skb->nh.iph->saddr
++			: skb->nh.iph->daddr);
++
++	if (!(skb->mac.raw >= skb->head
++	      && (skb->mac.raw + ETH_HLEN) <= skb->data))
++		return -EINVAL;
++
++	return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	struct ip_set_macip *table =
++	    (struct ip_set_macip *) map->members;
++
++	if (ip < map->first_ip || ip > map->last_ip)
++		return -ERANGE;
++	if (!test_and_clear_bit(IPSET_MACIP_ISSET, 
++				(void *)&table[ip - map->first_ip].flags))
++		return -EEXIST;
++
++	*hash_ip = ip;
++	DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++     ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_macipmap *req =
++	    (struct ip_set_req_macipmap *) data;
++
++	if (size != sizeof(struct ip_set_req_macipmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_macipmap),
++			      size);
++		return -EINVAL;
++	}
++	return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	return __delip(set,
++		       ntohl(flags[index] & IPSET_SRC 
++		       		? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++		       hash_ip);
++}
++
++static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
++{
++	return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	int newbytes;
++	struct ip_set_req_macipmap_create *req =
++	    (struct ip_set_req_macipmap_create *) data;
++	struct ip_set_macipmap *map;
++
++	if (size != sizeof(struct ip_set_req_macipmap_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_macipmap_create),
++			      size);
++		return -EINVAL;
++	}
++
++	DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++	   HIPQUAD(req->from), HIPQUAD(req->to));
++
++	if (req->from > req->to) {
++		DP("bad ip range");
++		return -ENOEXEC;
++	}
++
++	if (req->to - req->from > MAX_RANGE) {
++		ip_set_printk("range too big (max %d addresses)",
++			       MAX_RANGE+1);
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_macipmap));
++		return -ENOMEM;
++	}
++	map->flags = req->flags;
++	map->first_ip = req->from;
++	map->last_ip = req->to;
++	newbytes = members_size(map->first_ip, map->last_ip);
++	map->members = ip_set_malloc(newbytes);
++	DP("members: %u %p", newbytes, map->members);
++	if (!map->members) {
++		DP("out of memory for %d bytes", newbytes);
++		kfree(map);
++		return -ENOMEM;
++	}
++	memset(map->members, 0, newbytes);
++	
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++
++	ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
++	kfree(map);
++
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	memset(map->members, 0, members_size(map->first_ip, map->last_ip));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++	struct ip_set_req_macipmap_create *header =
++	    (struct ip_set_req_macipmap_create *) data;
++
++	DP("list_header %x %x %u", map->first_ip, map->last_ip,
++	   map->flags);
++
++	header->from = map->first_ip;
++	header->to = map->last_ip;
++	header->flags = map->flags;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++
++	DP("%u", members_size(map->first_ip, map->last_ip));
++	return members_size(map->first_ip, map->last_ip);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_macipmap *map =
++	    (struct ip_set_macipmap *) set->data;
++
++	int bytes = members_size(map->first_ip, map->last_ip);
++
++	DP("members: %u %p", bytes, map->members);
++	memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_macipmap = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_macipmap),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_macipmap_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("macipmap type of IP sets");
++
++static int __init init(void)
++{
++	init_max_malloc_size();
++	return ip_set_register_set_type(&ip_set_macipmap);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_macipmap);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_nethash.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_nethash.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_nethash.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,481 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing a cidr nethash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_nethash.h>
++#include <linux/netfilter_ipv4/ip_set_jhash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
++{
++	return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id_cidr(struct ip_set_nethash *map,
++	     ip_set_ip_t ip,
++	     unsigned char cidr,
++	     ip_set_ip_t *hash_ip)
++{
++	__u32 id;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++
++	*hash_ip = pack(ip, cidr);
++	
++	for (i = 0; i < map->probes; i++) {
++		id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++	   	DP("hash key: %u", id);
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++	   	if (*elem == *hash_ip)
++			return id;
++	}
++	return UINT_MAX;
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++	__u32 id = UINT_MAX;
++	int i;
++
++	for (i = 0; i < 30 && map->cidr[i]; i++) {
++		id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++		if (id != UINT_MAX)
++			break;
++	}
++	return id;
++}
++
++static inline int
++__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
++	      ip_set_ip_t *hash_ip)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++	return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++	return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++       ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_nethash *req = 
++	    (struct ip_set_req_nethash *) data;
++
++	if (size != sizeof(struct ip_set_req_nethash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_nethash),
++			      size);
++		return -EINVAL;
++	}
++	return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
++		: __testip_cidr(set, req->ip, req->cidr, hash_ip));
++}
++
++static int
++testip_kernel(struct ip_set *set, 
++	      const struct sk_buff *skb,
++	      ip_set_ip_t *hash_ip,
++	      const u_int32_t *flags,
++	      unsigned char index)
++{
++	return __testip(set,
++			ntohl(flags[index] & IPSET_SRC 
++				? skb->nh.iph->saddr 
++				: skb->nh.iph->daddr),
++			hash_ip);
++}
++
++static inline int
++__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++{
++	__u32 probe;
++	u_int16_t i;
++	ip_set_ip_t *elem;
++	
++	for (i = 0; i < map->probes; i++) {
++		probe = jhash_ip(map, i, ip) % map->hashsize;
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++		if (*elem == ip)
++			return -EEXIST;
++		if (!*elem) {
++			*elem = ip;
++			map->elements++;
++			return 0;
++		}
++	}
++	/* Trigger rehashing */
++	return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
++	ip_set_ip_t *hash_ip)
++{
++	if (!ip || map->elements > limit)
++		return -ERANGE;
++	
++	*hash_ip = pack(ip, cidr);
++	DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++	
++	return __addip_base(map, *hash_ip);
++}
++
++static void
++update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
++{
++	unsigned char next;
++	int i;
++	
++	for (i = 0; i < 30 && map->cidr[i]; i++) {
++		if (map->cidr[i] == cidr) {
++			return;
++		} else if (map->cidr[i] < cidr) {
++			next = map->cidr[i];
++			map->cidr[i] = cidr;
++			cidr = next;
++		}
++	}
++	if (i < 30)
++		map->cidr[i] = cidr;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_nethash *req = 
++	    (struct ip_set_req_nethash *) data;
++	int ret;
++
++	if (size != sizeof(struct ip_set_req_nethash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_nethash),
++			      size);
++		return -EINVAL;
++	}
++	ret = __addip((struct ip_set_nethash *) set->data, 
++		      req->ip, req->cidr, hash_ip);
++	
++	if (ret == 0)
++		update_cidr_sizes((struct ip_set_nethash *) set->data,
++				  req->cidr);
++	
++	return ret;
++}
++
++static int
++addip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++	int ret = -ERANGE;
++	ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC 
++					? skb->nh.iph->saddr
++					: skb->nh.iph->daddr);
++	
++	if (map->cidr[0])
++		ret = __addip(map, ip, map->cidr[0], hash_ip);
++		
++	return ret;
++}
++
++static int retry(struct ip_set *set)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++	ip_set_ip_t *elem;
++	void *members;
++	u_int32_t i, hashsize = map->hashsize;
++	int res;
++	struct ip_set_nethash *tmp;
++	
++	if (map->resize == 0)
++		return -ERANGE;
++
++    again:
++    	res = 0;
++    	
++	/* Calculate new parameters */
++	hashsize += (hashsize * map->resize)/100;
++	if (hashsize == map->hashsize)
++		hashsize++;
++	
++	ip_set_printk("rehashing of set %s triggered: "
++		      "hashsize grows from %u to %u",
++		      set->name, map->hashsize, hashsize);
++
++	tmp = kmalloc(sizeof(struct ip_set_nethash) 
++		      + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++	if (!tmp) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_nethash)
++		   + map->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++	if (!tmp->members) {
++		DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++		kfree(tmp);
++		return -ENOMEM;
++	}
++	tmp->hashsize = hashsize;
++	tmp->elements = 0;
++	tmp->probes = map->probes;
++	tmp->resize = map->resize;
++	memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++	memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
++	
++	write_lock_bh(&set->lock);
++	map = (struct ip_set_nethash *) set->data; /* Play safe */
++	for (i = 0; i < map->hashsize && res == 0; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		if (*elem)
++			res = __addip_base(tmp, *elem);
++	}
++	if (res) {
++		/* Failure, try again */
++		write_unlock_bh(&set->lock);
++		harray_free(tmp->members);
++		kfree(tmp);
++		goto again;
++	}
++	
++	/* Success at resizing! */
++	members = map->members;
++	
++	map->hashsize = tmp->hashsize;
++	map->members = tmp->members;
++	write_unlock_bh(&set->lock);
++
++	harray_free(members);
++	kfree(tmp);
++
++	return 0;
++}
++
++static inline int
++__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
++	ip_set_ip_t *hash_ip)
++{
++	ip_set_ip_t id, *elem;
++
++	if (!ip)
++		return -ERANGE;
++	
++	id = hash_id_cidr(map, ip, cidr, hash_ip);
++	if (id == UINT_MAX)
++		return -EEXIST;
++		
++	elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++	*elem = 0;
++	map->elements--;
++	return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_ip)
++{
++	struct ip_set_req_nethash *req =
++	    (struct ip_set_req_nethash *) data;
++
++	if (size != sizeof(struct ip_set_req_nethash)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_nethash),
++			      size);
++		return -EINVAL;
++	}
++	/* TODO: no garbage collection in map->cidr */		
++	return __delip((struct ip_set_nethash *) set->data, 
++		       req->ip, req->cidr, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set, 
++	     const struct sk_buff *skb,
++	     ip_set_ip_t *hash_ip,
++	     const u_int32_t *flags,
++	     unsigned char index)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++	int ret = -ERANGE;
++	ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC 
++					? skb->nh.iph->saddr
++					: skb->nh.iph->daddr);
++	
++	if (map->cidr[0])
++		ret = __delip(map, ip, map->cidr[0], hash_ip);
++	
++	return ret;
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	struct ip_set_req_nethash_create *req =
++	    (struct ip_set_req_nethash_create *) data;
++	struct ip_set_nethash *map;
++	uint16_t i;
++
++	if (size != sizeof(struct ip_set_req_nethash_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			       sizeof(struct ip_set_req_nethash_create),
++			       size);
++		return -EINVAL;
++	}
++
++	if (req->hashsize < 1) {
++		ip_set_printk("hashsize too small");
++		return -ENOEXEC;
++	}
++	if (req->probes < 1) {
++		ip_set_printk("probes too small");
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_nethash)
++		      + req->probes * sizeof(uint32_t), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_nethash)
++		   + req->probes * sizeof(uint32_t));
++		return -ENOMEM;
++	}
++	for (i = 0; i < req->probes; i++)
++		get_random_bytes(((uint32_t *) map->initval)+i, 4);
++	map->elements = 0;
++	map->hashsize = req->hashsize;
++	map->probes = req->probes;
++	map->resize = req->resize;
++	memset(map->cidr, 0, 30 * sizeof(unsigned char));
++	map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++	if (!map->members) {
++		DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++		kfree(map);
++		return -ENOMEM;
++	}
++	
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++	harray_free(map->members);
++	kfree(map);
++
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++	harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++	memset(map->cidr, 0, 30 * sizeof(unsigned char));
++	map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++	struct ip_set_req_nethash_create *header =
++	    (struct ip_set_req_nethash_create *) data;
++
++	header->hashsize = map->hashsize;
++	header->probes = map->probes;
++	header->resize = map->resize;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++	return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++	ip_set_ip_t i, *elem;
++
++	for (i = 0; i < map->hashsize; i++) {
++		elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);	
++		((ip_set_ip_t *)data)[i] = *elem;
++	}
++}
++
++static struct ip_set_type ip_set_nethash = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_nethash),
++	.addip			= &addip,
++	.addip_kernel		= &addip_kernel,
++	.retry			= &retry,
++	.delip			= &delip,
++	.delip_kernel		= &delip_kernel,
++	.testip			= &testip,
++	.testip_kernel		= &testip_kernel,
++	.header_size		= sizeof(struct ip_set_req_nethash_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("nethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init init(void)
++{
++	return ip_set_register_set_type(&ip_set_nethash);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_nethash);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_portmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_portmap.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ip_set_portmap.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_portmap.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,334 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module implementing a port set type as a bitmap */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_portmap.h>
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++	struct iphdr *iph = skb->nh.iph;
++	u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++
++	switch (iph->protocol) {
++	case IPPROTO_TCP: {
++		struct tcphdr tcph;
++		
++		/* See comments at tcp_match in ip_tables.c */
++		if (offset)
++			return INVALID_PORT;
++
++		if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++			/* No choice either */
++			return INVALID_PORT;
++	     	
++	     	return ntohs(flags & IPSET_SRC ?
++			     tcph.source : tcph.dest);
++	    }
++	case IPPROTO_UDP: {
++		struct udphdr udph;
++
++		if (offset)
++			return INVALID_PORT;
++
++		if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++			/* No choice either */
++			return INVALID_PORT;
++	     	
++	     	return ntohs(flags & IPSET_SRC ?
++			     udph.source : udph.dest);
++	    }
++	default:
++		return INVALID_PORT;
++	}
++}
++
++static inline int
++__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++	struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++	if (port < map->first_port || port > map->last_port)
++		return -ERANGE;
++		
++	*hash_port = port;
++	DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++	return !!test_bit(port - map->first_port, map->members);
++}
++
++static int
++testport(struct ip_set *set, const void *data, size_t size,
++         ip_set_ip_t *hash_port)
++{
++	struct ip_set_req_portmap *req = 
++	    (struct ip_set_req_portmap *) data;
++
++	if (size != sizeof(struct ip_set_req_portmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_portmap),
++			      size);
++		return -EINVAL;
++	}
++	return __testport(set, req->port, hash_port);
++}
++
++static int
++testport_kernel(struct ip_set *set, 
++	        const struct sk_buff *skb,
++	        ip_set_ip_t *hash_port,
++	        const u_int32_t *flags,
++	        unsigned char index)
++{
++	int res;
++	ip_set_ip_t port = get_port(skb, flags[index]);
++
++	DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);	
++	if (port == INVALID_PORT)
++		return 0;	
++
++	res =  __testport(set, port, hash_port);
++	
++	return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++	struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++	if (port < map->first_port || port > map->last_port)
++		return -ERANGE;
++	if (test_and_set_bit(port - map->first_port, map->members))
++		return -EEXIST;
++		
++	*hash_port = port;
++	DP("port %u", port);
++	return 0;
++}
++
++static int
++addport(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_port)
++{
++	struct ip_set_req_portmap *req = 
++	    (struct ip_set_req_portmap *) data;
++
++	if (size != sizeof(struct ip_set_req_portmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_portmap),
++			      size);
++		return -EINVAL;
++	}
++	return __addport(set, req->port, hash_port);
++}
++
++static int
++addport_kernel(struct ip_set *set, 
++	       const struct sk_buff *skb,
++	       ip_set_ip_t *hash_port,
++	       const u_int32_t *flags,
++	       unsigned char index)
++{
++	ip_set_ip_t port = get_port(skb, flags[index]);
++	
++	if (port == INVALID_PORT)
++		return -EINVAL;
++
++	return __addport(set, port, hash_port);
++}
++
++static inline int
++__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++	struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++	if (port < map->first_port || port > map->last_port)
++		return -ERANGE;
++	if (!test_and_clear_bit(port - map->first_port, map->members))
++		return -EEXIST;
++		
++	*hash_port = port;
++	DP("port %u", port);
++	return 0;
++}
++
++static int
++delport(struct ip_set *set, const void *data, size_t size,
++        ip_set_ip_t *hash_port)
++{
++	struct ip_set_req_portmap *req =
++	    (struct ip_set_req_portmap *) data;
++
++	if (size != sizeof(struct ip_set_req_portmap)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			      sizeof(struct ip_set_req_portmap),
++			      size);
++		return -EINVAL;
++	}
++	return __delport(set, req->port, hash_port);
++}
++
++static int
++delport_kernel(struct ip_set *set, 
++	       const struct sk_buff *skb,
++	       ip_set_ip_t *hash_port,
++	       const u_int32_t *flags,
++	       unsigned char index)
++{
++	ip_set_ip_t port = get_port(skb, flags[index]);
++	
++	if (port == INVALID_PORT)
++		return -EINVAL;
++
++	return __delport(set, port, hash_port);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++	int newbytes;
++	struct ip_set_req_portmap_create *req =
++	    (struct ip_set_req_portmap_create *) data;
++	struct ip_set_portmap *map;
++
++	if (size != sizeof(struct ip_set_req_portmap_create)) {
++		ip_set_printk("data length wrong (want %zu, have %zu)",
++			       sizeof(struct ip_set_req_portmap_create),
++			       size);
++		return -EINVAL;
++	}
++
++	DP("from %u to %u", req->from, req->to);
++
++	if (req->from > req->to) {
++		DP("bad port range");
++		return -ENOEXEC;
++	}
++
++	if (req->to - req->from > MAX_RANGE) {
++		ip_set_printk("range too big (max %d ports)",
++			       MAX_RANGE+1);
++		return -ENOEXEC;
++	}
++
++	map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
++	if (!map) {
++		DP("out of memory for %d bytes",
++		   sizeof(struct ip_set_portmap));
++		return -ENOMEM;
++	}
++	map->first_port = req->from;
++	map->last_port = req->to;
++	newbytes = bitmap_bytes(req->from, req->to);
++	map->members = kmalloc(newbytes, GFP_KERNEL);
++	if (!map->members) {
++		DP("out of memory for %d bytes", newbytes);
++		kfree(map);
++		return -ENOMEM;
++	}
++	memset(map->members, 0, newbytes);
++
++	set->data = map;
++	return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++	struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++	kfree(map->members);
++	kfree(map);
++
++	set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++	struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++	memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++	struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++	struct ip_set_req_portmap_create *header =
++	    (struct ip_set_req_portmap_create *) data;
++
++	DP("list_header %u %u", map->first_port, map->last_port);
++
++	header->from = map->first_port;
++	header->to = map->last_port;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++	struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++	return bitmap_bytes(map->first_port, map->last_port);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++	struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++	int bytes = bitmap_bytes(map->first_port, map->last_port);
++
++	memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_portmap = {
++	.typename		= SETTYPE_NAME,
++	.features		= IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
++	.protocol_version	= IP_SET_PROTOCOL_VERSION,
++	.create			= &create,
++	.destroy		= &destroy,
++	.flush			= &flush,
++	.reqsize		= sizeof(struct ip_set_req_portmap),
++	.addip			= &addport,
++	.addip_kernel		= &addport_kernel,
++	.delip			= &delport,
++	.delip_kernel		= &delport_kernel,
++	.testip			= &testport,
++	.testip_kernel		= &testport_kernel,
++	.header_size		= sizeof(struct ip_set_req_portmap_create),
++	.list_header		= &list_header,
++	.list_members_size	= &list_members_size,
++	.list_members		= &list_members,
++	.me			= THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
++
++static int __init init(void)
++{
++	return ip_set_register_set_type(&ip_set_portmap);
++}
++
++static void __exit fini(void)
++{
++	/* FIXME: possible race with ip_set_create() */
++	ip_set_unregister_set_type(&ip_set_portmap);
++}
++
++module_init(init);
++module_exit(fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ipt_set.c linux-2.6.21.1.new/net/ipv4/netfilter/ipt_set.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ipt_set.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ipt_set.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,150 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ *                         Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* Kernel module to match an IP set. */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ipt_set.h>
++
++static inline int
++match_set(const struct ipt_set_info *info,
++	  const struct sk_buff *skb,
++	  int inv)
++{	
++	if (ip_set_testip_kernel(info->index, skb, info->flags))
++		inv = !inv;
++	return inv;
++}
++
++static int
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++      const struct xt_match *match,
++#endif
++      const void *matchinfo,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++      int offset, unsigned int protoff, int *hotdrop)
++#else
++      int offset, int *hotdrop)
++#endif
++{
++	const struct ipt_set_info_match *info = matchinfo;
++		
++	return match_set(&info->match_set,
++			 skb,
++			 info->match_set.flags[0] & IPSET_MATCH_INV);
++}
++
++static int
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	   const void *inf,
++#else
++	   const struct ipt_ip *ip,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++	   const struct xt_match *match,
++#endif
++	   void *matchinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	   unsigned int matchsize,
++#endif
++	   unsigned int hook_mask)
++{
++	struct ipt_set_info_match *info = 
++		(struct ipt_set_info_match *) matchinfo;
++	ip_set_id_t index;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
++		ip_set_printk("invalid matchsize %d", matchsize);
++		return 0;
++	}
++#endif
++
++	index = ip_set_get_byindex(info->match_set.index);
++		
++	if (index == IP_SET_INVALID_ID) {
++		ip_set_printk("Cannot find set indentified by id %u to match",
++			      info->match_set.index);
++		return 0;	/* error */
++	}
++	if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
++		ip_set_printk("That's nasty!");
++		return 0;	/* error */
++	}
++
++	return 1;
++}
++
++static void destroy(
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++		    const struct xt_match *match,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++		    void *matchinfo, unsigned int matchsize)
++#else
++		    void *matchinfo)
++#endif
++{
++	struct ipt_set_info_match *info = matchinfo;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
++		ip_set_printk("invalid matchsize %d", matchsize);
++		return;
++	}
++#endif
++	ip_set_put(info->match_set.index);
++}
++
++static struct ipt_match set_match = {
++	.name		= "set",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++	.family		= AF_INET,
++#endif
++	.match		= &match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++	.matchsize	= sizeof(struct ipt_set_info_match),
++#endif
++	.checkentry	= &checkentry,
++	.destroy	= &destroy,
++	.me		= THIS_MODULE
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptables IP set match module");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++#define ipt_register_match	xt_register_match
++#define ipt_unregister_match	xt_unregister_match
++#endif
++
++static int __init ipt_ipset_init(void)
++{
++	return ipt_register_match(&set_match);
++}
++
++static void __exit ipt_ipset_fini(void)
++{
++	ipt_unregister_match(&set_match);
++}
++
++module_init(ipt_ipset_init);
++module_exit(ipt_ipset_fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ipt_SET.c linux-2.6.21.1.new/net/ipv4/netfilter/ipt_SET.c
+--- linux-2.6.21.1/net/ipv4/netfilter/ipt_SET.c	1969-12-31 18:00:00.000000000 -0600
++++ linux-2.6.21.1.new/net/ipv4/netfilter/ipt_SET.c	2007-06-08 16:29:31.833808500 -0500
+@@ -0,0 +1,168 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ *                         Patrick Schaaf <bof@bof.de>
++ *                         Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.  
++ */
++
++/* ipt_SET.c - netfilter target to manipulate IP sets */
++
++#include <linux/types.h>
++#include <linux/ip.h>
++#include <linux/timer.h>
++#include <linux/module.h>
++#include <linux/netfilter.h>
++#include <linux/netdevice.h>
++#include <linux/if.h>
++#include <linux/inetdevice.h>
++#include <linux/version.h>
++#include <net/protocol.h>
++#include <net/checksum.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_ipv4/ip_nat_rule.h>
++#include <linux/netfilter_ipv4/ipt_set.h>
++
++static unsigned int
++target(struct sk_buff **pskb,
++       const struct net_device *in,
++       const struct net_device *out,
++       unsigned int hooknum,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++       const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++       const void *targinfo,
++       void *userinfo)
++#else
++       const void *targinfo)
++#endif
++{
++	const struct ipt_set_info_target *info = targinfo;
++	
++	if (info->add_set.index != IP_SET_INVALID_ID)
++		ip_set_addip_kernel(info->add_set.index,
++				    *pskb,
++				    info->add_set.flags);
++	if (info->del_set.index != IP_SET_INVALID_ID)
++		ip_set_delip_kernel(info->del_set.index,
++				    *pskb,
++				    info->del_set.flags);
++
++	return IPT_CONTINUE;
++}
++
++static int
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	   const void *e,
++#else
++	   const struct ipt_entry *e,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++	   const struct xt_target *target,
++#endif
++	   void *targinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	   unsigned int targinfosize, 
++#endif
++	   unsigned int hook_mask)
++{
++	struct ipt_set_info_target *info = 
++		(struct ipt_set_info_target *) targinfo;
++	ip_set_id_t index;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	if (targinfosize != IPT_ALIGN(sizeof(*info))) {
++		DP("bad target info size %u", targinfosize);
++		return 0;
++	}
++#endif
++
++	if (info->add_set.index != IP_SET_INVALID_ID) {
++		index = ip_set_get_byindex(info->add_set.index);
++		if (index == IP_SET_INVALID_ID) {
++			ip_set_printk("cannot find add_set index %u as target",
++				      info->add_set.index);
++			return 0;	/* error */
++		}
++	}
++
++	if (info->del_set.index != IP_SET_INVALID_ID) {
++		index = ip_set_get_byindex(info->del_set.index);
++		if (index == IP_SET_INVALID_ID) {
++			ip_set_printk("cannot find del_set index %u as target",
++				      info->del_set.index);
++			return 0;	/* error */
++		}
++	}
++	if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
++	    || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
++		ip_set_printk("That's nasty!");
++		return 0;	/* error */
++	}
++
++	return 1;
++}
++
++static void destroy(
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++		    const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++		    void *targetinfo, unsigned int targetsize)
++#else
++		    void *targetinfo)
++#endif
++{
++	struct ipt_set_info_target *info = targetinfo;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
++		ip_set_printk("invalid targetsize %d", targetsize);
++		return;
++	}
++#endif
++	if (info->add_set.index != IP_SET_INVALID_ID)
++		ip_set_put(info->add_set.index);
++	if (info->del_set.index != IP_SET_INVALID_ID)
++		ip_set_put(info->del_set.index);
++}
++
++static struct ipt_target SET_target = {
++	.name 		= "SET",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++	.family		= AF_INET,
++#endif
++	.target 	= target,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++	.targetsize	= sizeof(struct ipt_set_info_target),
++#endif
++	.checkentry 	= checkentry,
++	.destroy 	= destroy,
++	.me 		= THIS_MODULE
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptables IP set target module");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++#define ipt_register_target      xt_register_target
++#define ipt_unregister_target    xt_unregister_target
++#endif
++
++static int __init ipt_SET_init(void)
++{
++	return ipt_register_target(&SET_target);
++}
++
++static void __exit ipt_SET_fini(void)
++{
++	ipt_unregister_target(&SET_target);
++}
++
++module_init(ipt_SET_init);
++module_exit(ipt_SET_fini);
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/Kconfig linux-2.6.21.1.new/net/ipv4/netfilter/Kconfig
+--- linux-2.6.21.1/net/ipv4/netfilter/Kconfig	2007-04-27 16:49:26.000000000 -0500
++++ linux-2.6.21.1.new/net/ipv4/netfilter/Kconfig	2007-06-08 16:29:31.833808500 -0500
+@@ -657,5 +657,114 @@
+ 	  Allows altering the ARP packet payload: source and destination
+ 	  hardware and network addresses.
+ 
++config IP_NF_SET
++	tristate "IP set support"
++	depends on INET && NETFILTER
++	help
++	  This option adds IP set support to the kernel.
++	  In order to define and use sets, you need the userspace utility
++	  ipset(8).
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_SET_MAX
++	int "Maximum number of IP sets"
++	default 256
++	range 2 65534
++	depends on IP_NF_SET
++	help
++	  You can define here default value of the maximum number 
++	  of IP sets for the kernel.
++
++	  The value can be overriden by the 'max_sets' module
++	  parameter of the 'ip_set' module.
++
++config IP_NF_SET_HASHSIZE
++	int "Hash size for bindings of IP sets"
++	default 1024
++	depends on IP_NF_SET
++	help
++	  You can define here default value of the hash size for
++	  bindings of IP sets.
++
++	  The value can be overriden by the 'hash_size' module
++	  parameter of the 'ip_set' module.
++
++config IP_NF_SET_IPMAP
++	tristate "ipmap set support"
++	depends on IP_NF_SET
++	help
++	  This option adds the ipmap set type support.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_SET_MACIPMAP
++	tristate "macipmap set support"
++	depends on IP_NF_SET
++	help
++	  This option adds the macipmap set type support.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_SET_PORTMAP
++	tristate "portmap set support"
++	depends on IP_NF_SET
++	help
++	  This option adds the portmap set type support.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_SET_IPHASH
++	tristate "iphash set support"
++	depends on IP_NF_SET
++	help
++	  This option adds the iphash set type support.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_SET_NETHASH
++	tristate "nethash set support"
++	depends on IP_NF_SET
++	help
++	  This option adds the nethash set type support.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_SET_IPPORTHASH
++	tristate "ipporthash set support"
++	depends on IP_NF_SET
++	help
++	  This option adds the ipporthash set type support.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_SET_IPTREE
++	tristate "iptree set support"
++	depends on IP_NF_SET
++	help
++	  This option adds the iptree set type support.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_MATCH_SET
++	tristate "set match support"
++	depends on IP_NF_SET
++	help
++	  Set matching matches against given IP sets.
++	  You need the ipset utility to create and set up the sets.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++config IP_NF_TARGET_SET
++	tristate "SET target support"
++	depends on IP_NF_SET
++	help
++	  The SET target makes possible to add/delete entries
++	  in IP sets.
++	  You need the ipset utility to create and set up the sets.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
++
+ endmenu
+ 
+diff -ruN linux-2.6.21.1/net/ipv4/netfilter/Makefile linux-2.6.21.1.new/net/ipv4/netfilter/Makefile
+--- linux-2.6.21.1/net/ipv4/netfilter/Makefile	2007-04-27 16:49:26.000000000 -0500
++++ linux-2.6.21.1.new/net/ipv4/netfilter/Makefile	2007-06-08 16:29:31.837808750 -0500
+@@ -90,6 +90,7 @@
+ obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
++obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
+ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+ 
+ # targets
+@@ -105,6 +106,17 @@
+ obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
+ obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
+ obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
++obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
++
++# sets
++obj-$(CONFIG_IP_NF_SET) += ip_set.o
++obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
++obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
++obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
++obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
++obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
+ 
+ # generic ARP tables
+ obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/target/linux/generic-2.6/patches-2.6.22/140-netfilter_time.patch b/target/linux/generic-2.6/patches-2.6.22/140-netfilter_time.patch
new file mode 100644
index 0000000000..086eb97cd3
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/140-netfilter_time.patch
@@ -0,0 +1,243 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_time.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_time.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_time.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_time.h	2007-05-26 20:31:03.596402160 +0200
+@@ -0,0 +1,18 @@
++#ifndef __ipt_time_h_included__
++#define __ipt_time_h_included__
++
++
++struct ipt_time_info {
++	u_int8_t  days_match;   /* 1 bit per day. -SMTWTFS                      */
++	u_int16_t time_start;   /* 0 < time_start < 23*60+59 = 1439             */
++	u_int16_t time_stop;    /* 0:0 < time_stat < 23:59                      */
++
++				/* FIXME: Keep this one for userspace iptables binary compability: */
++	u_int8_t  kerneltime;   /* ignore skb time (and use kerneltime) or not. */
++
++	time_t    date_start;
++	time_t    date_stop;
++};
++
++
++#endif /* __ipt_time_h_included__ */
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_time.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_time.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_time.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_time.c	2007-05-26 20:31:03.596402160 +0200
+@@ -0,0 +1,180 @@
++/*
++  This is a module which is used for time matching
++  It is using some modified code from dietlibc (localtime() function)
++  that you can find at http://www.fefe.de/dietlibc/
++  This file is distributed under the terms of the GNU General Public
++  License (GPL). Copies of the GPL can be obtained from: ftp://prep.ai.mit.edu/pub/gnu/GPL
++  2001-05-04 Fabrice MARIE <fabrice@netfilter.org> : initial development.
++  2001-21-05 Fabrice MARIE <fabrice@netfilter.org> : bug fix in the match code,
++     thanks to "Zeng Yu" <zengy@capitel.com.cn> for bug report.
++  2001-26-09 Fabrice MARIE <fabrice@netfilter.org> : force the match to be in LOCAL_IN or PRE_ROUTING only.
++  2001-30-11 Fabrice : added the possibility to use the match in FORWARD/OUTPUT with a little hack,
++     added Nguyen Dang Phuoc Dong <dongnd@tlnet.com.vn> patch to support timezones.
++  2004-05-02 Fabrice : added support for date matching, from an idea of Fabien COELHO.
++*/
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_time.h>
++#include <linux/time.h>
++
++MODULE_AUTHOR("Fabrice MARIE <fabrice@netfilter.org>");
++MODULE_DESCRIPTION("Match arrival timestamp/date");
++MODULE_LICENSE("GPL");
++
++struct tm
++{
++	int tm_sec;                   /* Seconds.     [0-60] (1 leap second) */
++	int tm_min;                   /* Minutes.     [0-59] */
++	int tm_hour;                  /* Hours.       [0-23] */
++	int tm_mday;                  /* Day.         [1-31] */
++	int tm_mon;                   /* Month.       [0-11] */
++	int tm_year;                  /* Year - 1900.  */
++	int tm_wday;                  /* Day of week. [0-6] */
++	int tm_yday;                  /* Days in year.[0-365] */
++	int tm_isdst;                 /* DST.         [-1/0/1]*/
++
++	long int tm_gmtoff;           /* we don't care, we count from GMT */
++	const char *tm_zone;          /* we don't care, we count from GMT */
++};
++
++void
++localtime(const u32 time, struct tm *r);
++
++static int
++match(const struct sk_buff *skb,
++      const struct net_device *in,
++      const struct net_device *out,
++      const struct xt_match *match,
++      const void *matchinfo,
++      int offset,
++      unsigned int protoff,
++      int *hotdrop)
++{
++	const struct ipt_time_info *info = matchinfo;   /* match info for rule */
++	struct timeval tv;
++	struct tm currenttime;                          /* time human readable */
++	u_int8_t days_of_week[7] = {64, 32, 16, 8, 4, 2, 1};
++	u_int16_t packet_time;
++
++	/* We might not have a timestamp, get one */
++	if (skb->tstamp.tv64 == 0)
++		__net_timestamp((struct sk_buff *)skb);
++
++	skb_get_timestamp(skb, &tv);
++	/* First we make sure we are in the date start-stop boundaries */
++	if ((tv.tv_sec < info->date_start) || (tv.tv_sec > info->date_stop))
++		return 0; /* We are outside the date boundaries */
++
++	/* Transform the timestamp of the packet, in a human readable form */
++	localtime(tv.tv_sec, &currenttime);
++
++	/* check if we match this timestamp, we start by the days... */
++	if ((days_of_week[currenttime.tm_wday] & info->days_match) != days_of_week[currenttime.tm_wday])
++		return 0; /* the day doesn't match */
++
++	/* ... check the time now */
++	packet_time = (currenttime.tm_hour * 60) + currenttime.tm_min;
++	if ((packet_time < info->time_start) || (packet_time > info->time_stop))
++		return 0;
++
++	/* here we match ! */
++	return 1;
++}
++
++static int
++checkentry(const char *tablename,
++           const void *ip,
++	   const struct xt_match *match,
++           void *matchinfo,
++           unsigned int hook_mask)
++{
++	struct ipt_time_info *info = matchinfo;   /* match info for rule */
++
++	/* First, check that we are in the correct hooks */
++	if (hook_mask
++            & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD) | (1 << NF_IP_LOCAL_OUT)))
++	{
++		printk("ipt_time: error, only valid for PRE_ROUTING, LOCAL_IN, FORWARD and OUTPUT)\n");
++		return 0;
++	}
++
++	/* Now check the coherence of the data ... */
++	if ((info->time_start > 1439) ||        /* 23*60+59 = 1439*/
++	    (info->time_stop  > 1439))
++	{
++		printk(KERN_WARNING "ipt_time: invalid argument\n");
++		return 0;
++	}
++
++	return 1;
++}
++
++static struct ipt_match time_match = {
++	.name = "time",
++	.match = &match,
++	.matchsize = sizeof(struct ipt_time_info),
++	.checkentry = &checkentry,
++	.me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++	printk("ipt_time loading\n");
++	return xt_register_match(&time_match);
++}
++
++static void __exit fini(void)
++{
++	xt_unregister_match(&time_match);
++	printk("ipt_time unloaded\n");
++}
++
++module_init(init);
++module_exit(fini);
++
++
++/* The part below is borowed and modified from dietlibc */
++
++/* seconds per day */
++#define SPD 24*60*60
++
++void
++localtime(const u32 time, struct tm *r) {
++	u32 i, timep;
++	extern struct timezone sys_tz;
++	const unsigned int __spm[12] =
++		{ 0,
++		  (31),
++		  (31+28),
++		  (31+28+31),
++		  (31+28+31+30),
++		  (31+28+31+30+31),
++		  (31+28+31+30+31+30),
++		  (31+28+31+30+31+30+31),
++		  (31+28+31+30+31+30+31+31),
++		  (31+28+31+30+31+30+31+31+30),
++		  (31+28+31+30+31+30+31+31+30+31),
++		  (31+28+31+30+31+30+31+31+30+31+30),
++		};
++	register u32 work;
++
++	timep = time - (sys_tz.tz_minuteswest * 60);
++	work=timep%(SPD);
++	r->tm_sec=work%60; work/=60;
++	r->tm_min=work%60; r->tm_hour=work/60;
++	work=timep/(SPD);
++	r->tm_wday=(4+work)%7;
++	for (i=1970; ; ++i) {
++		register time_t k= (!(i%4) && ((i%100) || !(i%400)))?366:365;
++		if (work>k)
++			work-=k;
++		else
++			break;
++	}
++	r->tm_year=i-1900;
++	for (i=11; i && __spm[i]>work; --i) ;
++	r->tm_mon=i;
++	r->tm_mday=work-__spm[i]+1;
++}
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig	2007-05-26 20:31:03.452424048 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig	2007-05-26 20:31:03.597402008 +0200
+@@ -96,6 +96,22 @@
+ 
+ 	  To compile it as a module, choose M here.  If unsure, say N.
+ 
++
++config IP_NF_MATCH_TIME
++	tristate  'TIME match support'
++	depends on IP_NF_IPTABLES
++	help
++	  This option adds a `time' match, which allows you
++	  to match based on the packet arrival time/date
++	  (arrival time/date at the machine which netfilter is running on) or
++	  departure time/date (for locally generated packets).
++
++	  If you say Y here, try iptables -m time --help for more information.
++	  If you want to compile it as a module, say M here and read
++
++	  Documentation/modules.txt.  If unsure, say `N'.
++
++
+ config IP_NF_MATCH_RECENT
+ 	tristate "recent match support"
+ 	depends on IP_NF_IPTABLES
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Makefile linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Makefile	2007-05-26 20:31:03.452424048 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile	2007-05-26 20:31:03.597402008 +0200
+@@ -44,6 +44,7 @@
+ obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
+ obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
+ obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
++obj-$(CONFIG_IP_NF_MATCH_TIME) += ipt_time.o
+ obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
+ obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
diff --git a/target/linux/generic-2.6/patches-2.6.22/150-netfilter_imq.patch b/target/linux/generic-2.6/patches-2.6.22/150-netfilter_imq.patch
new file mode 100644
index 0000000000..36a4cadd7e
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/150-netfilter_imq.patch
@@ -0,0 +1,867 @@
+diff -urN linux-2.6.21.1.old/drivers/net/imq.c linux-2.6.21.1.dev/drivers/net/imq.c
+--- linux-2.6.21.1.old/drivers/net/imq.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/drivers/net/imq.c	2007-05-26 20:34:15.180276984 +0200
+@@ -0,0 +1,402 @@
++/*
++ *             Pseudo-driver for the intermediate queue device.
++ *
++ *             This program is free software; you can redistribute it and/or
++ *             modify it under the terms of the GNU General Public License
++ *             as published by the Free Software Foundation; either version
++ *             2 of the License, or (at your option) any later version.
++ *
++ * Authors:    Patrick McHardy, <kaber@trash.net>
++ *
++ *            The first version was written by Martin Devera, <devik@cdi.cz>
++ *
++ * Credits:    Jan Rafaj <imq2t@cedric.vabo.cz>
++ *              - Update patch to 2.4.21
++ *             Sebastian Strollo <sstrollo@nortelnetworks.com>
++ *              - Fix "Dead-loop on netdevice imq"-issue
++ *             Marcel Sebek <sebek64@post.cz>
++ *              - Update to 2.6.2-rc1
++ *
++ *	       After some time of inactivity there is a group taking care
++ *	       of IMQ again: http://www.linuximq.net
++ *
++ *
++ *	       2004/06/30 - New version of IMQ patch to kernels <=2.6.7 including
++ *	       the following changes:
++ *
++ *	       - Correction of ipv6 support "+"s issue (Hasso Tepper)
++ *	       - Correction of imq_init_devs() issue that resulted in
++ *	       kernel OOPS unloading IMQ as module (Norbert Buchmuller)
++ *	       - Addition of functionality to choose number of IMQ devices
++ *	       during kernel config (Andre Correa)
++ *	       - Addition of functionality to choose how IMQ hooks on
++ *	       PRE and POSTROUTING (after or before NAT) (Andre Correa)
++ *	       - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
++ *
++ *
++ *             2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
++ *             released with almost no problems. 2.6.14-x was released
++ *             with some important changes: nfcache was removed; After
++ *             some weeks of trouble we figured out that some IMQ fields
++ *             in skb were missing in skbuff.c - skb_clone and copy_skb_header.
++ *             These functions are correctly patched by this new patch version.
++ *
++ *             Thanks for all who helped to figure out all the problems with
++ *             2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
++ *             Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
++ *             I didn't forget anybody). I apologize again for my lack of time.
++ *
++ *             More info at: http://www.linuximq.net/ (Andre Correa)
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/moduleparam.h>
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_arp.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_ipv4.h>
++#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++	#include <linux/netfilter_ipv6.h>
++#endif
++#include <linux/imq.h>
++#include <net/pkt_sched.h>
++
++extern int qdisc_restart1(struct net_device *dev);
++
++static nf_hookfn imq_nf_hook;
++
++static struct nf_hook_ops imq_ingress_ipv4 = {
++	.hook		= imq_nf_hook,
++	.owner		= THIS_MODULE,
++	.pf		= PF_INET,
++	.hooknum	= NF_IP_PRE_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++	.priority	= NF_IP_PRI_MANGLE + 1
++#else
++	.priority	= NF_IP_PRI_NAT_DST + 1
++#endif
++};
++
++static struct nf_hook_ops imq_egress_ipv4 = {
++	.hook		= imq_nf_hook,
++	.owner		= THIS_MODULE,
++	.pf		= PF_INET,
++	.hooknum	= NF_IP_POST_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
++	.priority	= NF_IP_PRI_LAST
++#else
++	.priority	= NF_IP_PRI_NAT_SRC - 1
++#endif
++};
++
++#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++static struct nf_hook_ops imq_ingress_ipv6 = {
++	.hook		= imq_nf_hook,
++	.owner		= THIS_MODULE,
++	.pf		= PF_INET6,
++	.hooknum	= NF_IP6_PRE_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++	.priority	= NF_IP6_PRI_MANGLE + 1
++#else
++	.priority	= NF_IP6_PRI_NAT_DST + 1
++#endif
++};
++
++static struct nf_hook_ops imq_egress_ipv6 = {
++	.hook		= imq_nf_hook,
++	.owner		= THIS_MODULE,
++	.pf		= PF_INET6,
++	.hooknum	= NF_IP6_POST_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
++	.priority	= NF_IP6_PRI_LAST
++#else
++	.priority	= NF_IP6_PRI_NAT_SRC - 1
++#endif
++};
++#endif
++
++#if defined(CONFIG_IMQ_NUM_DEVS)
++static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
++#else
++static unsigned int numdevs = 2;
++#endif
++
++static struct net_device *imq_devs;
++
++static struct net_device_stats *imq_get_stats(struct net_device *dev)
++{
++	return (struct net_device_stats *)dev->priv;
++}
++
++/* called for packets kfree'd in qdiscs at places other than enqueue */
++static void imq_skb_destructor(struct sk_buff *skb)
++{
++	struct nf_info *info = skb->nf_info;
++
++	if (info) {
++		if (info->indev)
++			dev_put(info->indev);
++		if (info->outdev)
++			dev_put(info->outdev);
++		kfree(info);
++	}
++}
++
++static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++	struct net_device_stats *stats = (struct net_device_stats*) dev->priv;
++
++	stats->tx_bytes += skb->len;
++	stats->tx_packets++;
++
++	skb->imq_flags = 0;
++	skb->destructor = NULL;
++
++	dev->trans_start = jiffies;
++	nf_reinject(skb, skb->nf_info, NF_ACCEPT);
++	return 0;
++}
++
++static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info, unsigned queue_num, void *data)
++{
++	struct net_device *dev;
++	struct net_device_stats *stats;
++	struct sk_buff *skb2 = NULL;
++	struct Qdisc *q;
++	unsigned int index = skb->imq_flags&IMQ_F_IFMASK;
++	int ret = -1;
++
++	if (index > numdevs)
++		return -1;
++
++	dev = imq_devs + index;
++	if (!(dev->flags & IFF_UP)) {
++		skb->imq_flags = 0;
++		nf_reinject(skb, info, NF_ACCEPT);
++		return 0;
++	}
++	dev->last_rx = jiffies;
++
++	if (skb->destructor) {
++		skb2 = skb;
++		skb = skb_clone(skb, GFP_ATOMIC);
++		if (!skb)
++			return -1;
++	}
++	skb->nf_info = info;
++
++	stats = (struct net_device_stats *)dev->priv;
++	stats->rx_bytes+= skb->len;
++	stats->rx_packets++;
++
++	spin_lock_bh(&dev->queue_lock);
++	q = dev->qdisc;
++	if (q->enqueue) {
++		q->enqueue(skb_get(skb), q);
++		if (skb_shared(skb)) {
++			skb->destructor = imq_skb_destructor;
++			kfree_skb(skb);
++			ret = 0;
++		}
++	}
++	if (spin_is_locked(&dev->_xmit_lock))
++		netif_schedule(dev);
++	else
++		while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
++			/* NOTHING */;
++
++	spin_unlock_bh(&dev->queue_lock);
++
++	if (skb2)
++		kfree_skb(ret ? skb : skb2);
++
++	return ret;
++}
++
++static struct nf_queue_handler nfqh = {
++	.name  = "imq",
++	.outfn = imq_nf_queue,
++};
++
++static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff **pskb,
++				const struct net_device *indev,
++				const struct net_device *outdev,
++				int (*okfn)(struct sk_buff *))
++{
++	if ((*pskb)->imq_flags & IMQ_F_ENQUEUE)
++		return NF_QUEUE;
++
++	return NF_ACCEPT;
++}
++
++
++static int __init imq_init_hooks(void)
++{
++	int err;
++
++	err = nf_register_queue_handler(PF_INET, &nfqh);
++	if (err > 0)
++		goto err1;
++	if ((err = nf_register_hook(&imq_ingress_ipv4)))
++		goto err2;
++	if ((err = nf_register_hook(&imq_egress_ipv4)))
++		goto err3;
++#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++	if ((err = nf_register_queue_handler(PF_INET6, &nfqh)))
++		goto err4;
++	if ((err = nf_register_hook(&imq_ingress_ipv6)))
++		goto err5;
++	if ((err = nf_register_hook(&imq_egress_ipv6)))
++		goto err6;
++#endif
++
++	return 0;
++
++#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++err6:
++	nf_unregister_hook(&imq_ingress_ipv6);
++err5:
++	nf_unregister_queue_handler(PF_INET6);
++err4:
++	nf_unregister_hook(&imq_egress_ipv6);
++#endif
++err3:
++	nf_unregister_hook(&imq_ingress_ipv4);
++err2:
++	nf_unregister_queue_handler(PF_INET);
++err1:
++	return err;
++}
++
++static void __exit imq_unhook(void)
++{
++	nf_unregister_hook(&imq_ingress_ipv4);
++	nf_unregister_hook(&imq_egress_ipv4);
++	nf_unregister_queue_handler(PF_INET);
++#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++	nf_unregister_hook(&imq_ingress_ipv6);
++	nf_unregister_hook(&imq_egress_ipv6);
++	nf_unregister_queue_handler(PF_INET6);
++#endif
++}
++
++static int __init imq_dev_init(struct net_device *dev)
++{
++	dev->hard_start_xmit    = imq_dev_xmit;
++	dev->type               = ARPHRD_VOID;
++	dev->mtu                = 1500;
++	dev->tx_queue_len       = 30;
++	dev->flags              = IFF_NOARP;
++	dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL);
++	if (dev->priv == NULL)
++		return -ENOMEM;
++	memset(dev->priv, 0, sizeof(struct net_device_stats));
++	dev->get_stats          = imq_get_stats;
++
++	return 0;
++}
++
++static void imq_dev_uninit(struct net_device *dev)
++{
++	kfree(dev->priv);
++}
++
++static int __init imq_init_devs(void)
++{
++	struct net_device *dev;
++	int i,j;
++	j = numdevs;
++
++	if (!numdevs || numdevs > IMQ_MAX_DEVS) {
++		printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
++		       IMQ_MAX_DEVS);
++		return -EINVAL;
++	}
++
++	imq_devs = kmalloc(sizeof(struct net_device) * numdevs, GFP_KERNEL);
++	if (!imq_devs)
++		return -ENOMEM;
++	memset(imq_devs, 0, sizeof(struct net_device) * numdevs);
++
++	/* we start counting at zero */
++	numdevs--;
++
++	for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) {
++		SET_MODULE_OWNER(dev);
++		strcpy(dev->name, "imq%d");
++		dev->init   = imq_dev_init;
++		dev->uninit = imq_dev_uninit;
++
++		if (register_netdev(dev) < 0)
++			goto err_register;
++	}
++	printk(KERN_INFO "IMQ starting with %u devices...\n", j);
++	return 0;
++
++err_register:
++	for (; i; i--)
++		unregister_netdev(--dev);
++	kfree(imq_devs);
++	return -EIO;
++}
++
++static void imq_cleanup_devs(void)
++{
++	int i;
++	struct net_device *dev = imq_devs;
++
++	for (i = 0; i <= numdevs; i++)
++		unregister_netdev(dev++);
++
++	kfree(imq_devs);
++}
++
++static int __init imq_init_module(void)
++{
++	int err;
++
++	if ((err = imq_init_devs())) {
++		printk(KERN_ERR "IMQ: Error trying imq_init_devs()\n");
++		return err;
++	}
++	if ((err = imq_init_hooks())) {
++		printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
++		imq_cleanup_devs();
++		return err;
++	}
++
++	printk(KERN_INFO "IMQ driver loaded successfully.\n");
++
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++	printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
++#else
++	printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
++#endif
++#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++	printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
++#else
++	printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
++#endif
++
++	return 0;
++}
++
++static void __exit imq_cleanup_module(void)
++{
++	imq_unhook();
++	imq_cleanup_devs();
++	printk(KERN_INFO "IMQ driver unloaded successfully.\n");
++}
++
++
++module_init(imq_init_module);
++module_exit(imq_cleanup_module);
++
++module_param(numdevs, int, 0);
++MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
+diff -urN linux-2.6.21.1.old/drivers/net/Kconfig linux-2.6.21.1.dev/drivers/net/Kconfig
+--- linux-2.6.21.1.old/drivers/net/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/net/Kconfig	2007-05-26 20:34:15.193275008 +0200
+@@ -96,6 +96,129 @@
+ 	  To compile this driver as a module, choose M here: the module
+ 	  will be called eql.  If unsure, say N.
+ 
++config IMQ
++	tristate "IMQ (intermediate queueing device) support"
++	depends on NETDEVICES && NETFILTER
++	---help---
++	  The IMQ device(s) is used as placeholder for QoS queueing
++	  disciplines. Every packet entering/leaving the IP stack can be
++	  directed through the IMQ device where it's enqueued/dequeued to the
++	  attached qdisc. This allows you to treat network devices as classes
++	  and distribute bandwidth among them. Iptables is used to specify
++	  through which IMQ device, if any, packets travel.
++
++	  More information at: http://www.linuximq.net/
++
++	  To compile this driver as a module, choose M here: the module
++	  will be called imq.  If unsure, say N.
++
++choice
++	prompt "IMQ behavior (PRE/POSTROUTING)"
++	depends on IMQ
++	default IMQ_BEHAVIOR_BA
++	help
++
++		This settings defines how IMQ behaves in respect to its
++		hooking in PREROUTING and POSTROUTING.
++
++		IMQ can work in any of the following ways:
++
++		    PREROUTING   |      POSTROUTING
++		-----------------|-------------------
++		#1  After NAT    |      After NAT
++		#2  After NAT    |      Before NAT
++		#3  Before NAT   |      After NAT
++		#4  Before NAT   |      Before NAT
++
++		The default behavior is to hook before NAT on PREROUTING
++		and after NAT on POSTROUTING (#3).
++
++		This settings are specially usefull when trying to use IMQ
++		to shape NATed clients.
++
++		More information can be found at: www.linuximq.net
++
++		If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_AA
++	bool "IMQ AA"
++	help
++		This settings defines how IMQ behaves in respect to its
++		hooking in PREROUTING and POSTROUTING.
++
++		Choosing this option will make IMQ hook like this:
++
++		PREROUTING:   After NAT
++		POSTROUTING:  After NAT
++
++		More information can be found at: www.linuximq.net
++
++		If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_AB
++	bool "IMQ AB"
++	help
++		This settings defines how IMQ behaves in respect to its
++		hooking in PREROUTING and POSTROUTING.
++
++		Choosing this option will make IMQ hook like this:
++
++		PREROUTING:   After NAT
++		POSTROUTING:  Before NAT
++
++		More information can be found at: www.linuximq.net
++
++		If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_BA
++	bool "IMQ BA"
++	help
++		This settings defines how IMQ behaves in respect to its
++		hooking in PREROUTING and POSTROUTING.
++
++		Choosing this option will make IMQ hook like this:
++
++		PREROUTING:   Before NAT
++		POSTROUTING:  After NAT
++
++		More information can be found at: www.linuximq.net
++
++		If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_BB
++	bool "IMQ BB"
++	help
++		This settings defines how IMQ behaves in respect to its
++		hooking in PREROUTING and POSTROUTING.
++
++		Choosing this option will make IMQ hook like this:
++
++		PREROUTING:   Before NAT
++		POSTROUTING:  Before NAT
++
++		More information can be found at: www.linuximq.net
++
++		If not sure leave the default settings alone.
++
++endchoice
++
++config IMQ_NUM_DEVS
++
++	int "Number of IMQ devices"
++	range 2 8
++	depends on IMQ
++	default "2"
++	help
++
++		This settings defines how many IMQ devices will be 
++		created.
++
++		The default value is 2.
++
++		More information can be found at: www.linuximq.net
++
++		If not sure leave the default settings alone.
++
+ config TUN
+ 	tristate "Universal TUN/TAP device driver support"
+ 	select CRC32
+diff -urN linux-2.6.21.1.old/drivers/net/Makefile linux-2.6.21.1.dev/drivers/net/Makefile
+--- linux-2.6.21.1.old/drivers/net/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/net/Makefile	2007-05-26 20:34:15.194274856 +0200
+@@ -124,6 +124,7 @@
+ obj-$(CONFIG_SLHC) += slhc.o
+ 
+ obj-$(CONFIG_DUMMY) += dummy.o
++obj-$(CONFIG_IMQ) += imq.o
+ obj-$(CONFIG_IFB) += ifb.o
+ obj-$(CONFIG_DE600) += de600.o
+ obj-$(CONFIG_DE620) += de620.o
+diff -urN linux-2.6.21.1.old/include/linux/imq.h linux-2.6.21.1.dev/include/linux/imq.h
+--- linux-2.6.21.1.old/include/linux/imq.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/imq.h	2007-05-26 20:34:15.458234728 +0200
+@@ -0,0 +1,9 @@
++#ifndef _IMQ_H
++#define _IMQ_H
++
++#define IMQ_MAX_DEVS   16
++
++#define IMQ_F_IFMASK   0x7f
++#define IMQ_F_ENQUEUE  0x80
++
++#endif /* _IMQ_H */
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_IMQ.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_IMQ.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_IMQ.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_IMQ.h	2007-05-26 20:34:15.458234728 +0200
+@@ -0,0 +1,8 @@
++#ifndef _IPT_IMQ_H
++#define _IPT_IMQ_H
++
++struct ipt_imq_info {
++	unsigned int todev;     /* target imq device */
++};
++
++#endif /* _IPT_IMQ_H */
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv6/ip6t_IMQ.h linux-2.6.21.1.dev/include/linux/netfilter_ipv6/ip6t_IMQ.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv6/ip6t_IMQ.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv6/ip6t_IMQ.h	2007-05-26 20:34:15.495229104 +0200
+@@ -0,0 +1,8 @@
++#ifndef _IP6T_IMQ_H
++#define _IP6T_IMQ_H
++
++struct ip6t_imq_info {
++	unsigned int todev;     /* target imq device */
++};
++
++#endif /* _IP6T_IMQ_H */
+diff -urN linux-2.6.21.1.old/include/linux/skbuff.h linux-2.6.21.1.dev/include/linux/skbuff.h
+--- linux-2.6.21.1.old/include/linux/skbuff.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/linux/skbuff.h	2007-05-26 20:34:15.496228952 +0200
+@@ -285,6 +285,10 @@
+ 	struct nf_conntrack	*nfct;
+ 	struct sk_buff		*nfct_reasm;
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++	unsigned char		imq_flags;
++	struct nf_info		*nf_info;
++#endif
+ #ifdef CONFIG_BRIDGE_NETFILTER
+ 	struct nf_bridge_info	*nf_bridge;
+ #endif
+diff -urN linux-2.6.21.1.old/net/core/dev.c linux-2.6.21.1.dev/net/core/dev.c
+--- linux-2.6.21.1.old/net/core/dev.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/core/dev.c	2007-05-26 20:34:15.497228800 +0200
+@@ -94,6 +94,9 @@
+ #include <linux/skbuff.h>
+ #include <net/sock.h>
+ #include <linux/rtnetlink.h>
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++#include <linux/imq.h>
++#endif
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
+ #include <linux/stat.h>
+@@ -1403,7 +1406,11 @@
+ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	if (likely(!skb->next)) {
+-		if (!list_empty(&ptype_all))
++		if (!list_empty(&ptype_all)
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++		    && !(skb->imq_flags & IMQ_F_ENQUEUE)
++#endif
++		    )
+ 			dev_queue_xmit_nit(skb, dev);
+ 
+ 		if (netif_needs_gso(dev, skb)) {
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_IMQ.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_IMQ.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_IMQ.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_IMQ.c	2007-05-26 20:34:15.498228648 +0200
+@@ -0,0 +1,71 @@
++/*
++ * This target marks packets to be enqueued to an imq device
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_IMQ.h>
++#include <linux/imq.h>
++
++static unsigned int imq_target(struct sk_buff **pskb,
++			       const struct net_device *in,
++			       const struct net_device *out,
++			       unsigned int hooknum,
++			       const struct xt_target *target,
++			       const void *targinfo)
++{
++	struct ipt_imq_info *mr = (struct ipt_imq_info*)targinfo;
++
++	(*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
++
++	return IPT_CONTINUE;
++}
++
++static int imq_checkentry(const char *tablename,
++			  const void *e,
++			  const struct xt_target *target,
++			  void *targinfo,
++			  unsigned int hook_mask)
++{
++	struct ipt_imq_info *mr;
++
++	mr = (struct ipt_imq_info*)targinfo;
++
++	if (mr->todev > IMQ_MAX_DEVS) {
++		printk(KERN_WARNING
++		       "IMQ: invalid device specified, highest is %u\n",
++		       IMQ_MAX_DEVS);
++		return 0;
++	}
++
++	return 1;
++}
++
++static struct ipt_target ipt_imq_reg = {
++	.name		= "IMQ",
++	.target		= imq_target,
++	.targetsize	= sizeof(struct ipt_imq_info),
++	.checkentry	= imq_checkentry,
++	.me		= THIS_MODULE,
++	.table		= "mangle"
++};
++
++static int __init init(void)
++{
++	if (xt_register_target(&ipt_imq_reg))
++		return -EINVAL;
++
++	return 0;
++}
++
++static void __exit fini(void)
++{
++	xt_unregister_target(&ipt_imq_reg);
++}
++
++module_init(init);
++module_exit(fini);
++
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig	2007-05-26 20:34:13.929467136 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig	2007-05-26 20:34:15.498228648 +0200
+@@ -351,6 +351,17 @@
+ 
+ 	  To compile it as a module, choose M here.  If unsure, say N.
+ 
++config IP_NF_TARGET_IMQ
++       tristate "IMQ target support"
++       depends on IP_NF_MANGLE
++       help
++         This option adds a `IMQ' target which is used to specify if and
++         to which IMQ device packets should get enqueued/dequeued.
++
++	 For more information visit: http://www.linuximq.net/
++
++         To compile it as a module, choose M here.  If unsure, say N.
++
+ config IP_NF_TARGET_TOS
+ 	tristate "TOS target support"
+ 	depends on IP_NF_MANGLE
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Makefile linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Makefile	2007-05-26 20:34:13.930466984 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile	2007-05-26 20:34:15.499228496 +0200
+@@ -58,6 +58,7 @@
+ obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
+ obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
+ obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
++obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
+ obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
+ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
+ obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
+diff -urN linux-2.6.21.1.old/net/ipv6/netfilter/ip6t_IMQ.c linux-2.6.21.1.dev/net/ipv6/netfilter/ip6t_IMQ.c
+--- linux-2.6.21.1.old/net/ipv6/netfilter/ip6t_IMQ.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv6/netfilter/ip6t_IMQ.c	2007-05-26 20:34:15.531223632 +0200
+@@ -0,0 +1,71 @@
++/*
++ * This target marks packets to be enqueued to an imq device
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter_ipv6/ip6_tables.h>
++#include <linux/netfilter_ipv6/ip6t_IMQ.h>
++#include <linux/imq.h>
++
++static unsigned int imq_target(struct sk_buff **pskb,
++			       const struct net_device *in,
++			       const struct net_device *out,
++			       unsigned int hooknum,
++			       const struct xt_target *target,
++			       const void *targinfo)
++{
++	struct ip6t_imq_info *mr = (struct ip6t_imq_info*)targinfo;
++
++	(*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
++
++	return IP6T_CONTINUE;
++}
++
++static int imq_checkentry(const char *tablename,
++			  const void *entry,
++			  const struct xt_target *target,
++			  void *targinfo,
++			  unsigned int hook_mask)
++{
++	struct ip6t_imq_info *mr;
++
++	mr = (struct ip6t_imq_info*)targinfo;
++
++	if (mr->todev > IMQ_MAX_DEVS) {
++		printk(KERN_WARNING
++		       "IMQ: invalid device specified, highest is %u\n",
++		       IMQ_MAX_DEVS);
++		return 0;
++	}
++
++	return 1;
++}
++
++static struct ip6t_target ip6t_imq_reg = {
++	.name           = "IMQ",
++	.target         = imq_target,
++	.targetsize	= sizeof(struct ip6t_imq_info),
++	.table		= "mangle",
++	.checkentry     = imq_checkentry,
++	.me             = THIS_MODULE
++};
++
++static int __init init(void)
++{
++	if (xt_register_target(&ip6t_imq_reg))
++		return -EINVAL;
++
++	return 0;
++}
++
++static void __exit fini(void)
++{
++	xt_unregister_target(&ip6t_imq_reg);
++}
++
++module_init(init);
++module_exit(fini);
++
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
+diff -urN linux-2.6.21.1.old/net/ipv6/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv6/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv6/netfilter/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv6/netfilter/Kconfig	2007-05-26 20:34:15.531223632 +0200
+@@ -173,6 +173,15 @@
+ 
+ 	  To compile it as a module, choose M here.  If unsure, say N.
+ 
++config IP6_NF_TARGET_IMQ
++	tristate "IMQ target support"
++	depends on IP6_NF_MANGLE
++	help
++          This option adds a `IMQ' target which is used to specify if and
++          to which imq device packets should get enqueued/dequeued.
++
++          To compile it as a module, choose M here.  If unsure, say N.
++
+ config IP6_NF_TARGET_HL
+ 	tristate  'HL (hoplimit) target support'
+ 	depends on IP6_NF_MANGLE
+diff -urN linux-2.6.21.1.old/net/ipv6/netfilter/Makefile linux-2.6.21.1.dev/net/ipv6/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv6/netfilter/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv6/netfilter/Makefile	2007-05-26 20:34:15.558219528 +0200
+@@ -13,6 +13,7 @@
+ obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
+ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
+ obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
++obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o
+ obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
+ obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
+ obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
+diff -urN linux-2.6.21.1.old/net/sched/sch_generic.c linux-2.6.21.1.dev/net/sched/sch_generic.c
+--- linux-2.6.21.1.old/net/sched/sch_generic.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/sched/sch_generic.c	2007-05-26 20:34:15.599213296 +0200
+@@ -77,7 +77,6 @@
+ 
+    NOTE: Called under dev->queue_lock with locally disabled BH.
+ */
+-
+ static inline int qdisc_restart(struct net_device *dev)
+ {
+ 	struct Qdisc *q = dev->qdisc;
+@@ -177,6 +176,11 @@
+ 	return q->q.qlen;
+ }
+ 
++int qdisc_restart1(struct net_device *dev)
++{
++	return qdisc_restart(dev);
++}
++
+ void __qdisc_run(struct net_device *dev)
+ {
+ 	do {
+@@ -607,3 +611,4 @@
+ EXPORT_SYMBOL(qdisc_reset);
+ EXPORT_SYMBOL(qdisc_lock_tree);
+ EXPORT_SYMBOL(qdisc_unlock_tree);
++EXPORT_SYMBOL(qdisc_restart1);
diff --git a/target/linux/generic-2.6/patches-2.6.22/160-netfilter_route.patch b/target/linux/generic-2.6/patches-2.6.22/160-netfilter_route.patch
new file mode 100644
index 0000000000..0e9984bbbb
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/160-netfilter_route.patch
@@ -0,0 +1,957 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_ROUTE.h linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_ROUTE.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv4/ipt_ROUTE.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv4/ipt_ROUTE.h	2007-05-26 20:37:08.971856648 +0200
+@@ -0,0 +1,23 @@
++/* Header file for iptables ipt_ROUTE target
++ *
++ * (C) 2002 by Cédric de Launois <delaunois@info.ucl.ac.be>
++ *
++ * This software is distributed under GNU GPL v2, 1991
++ */
++#ifndef _IPT_ROUTE_H_target
++#define _IPT_ROUTE_H_target
++
++#define IPT_ROUTE_IFNAMSIZ 16
++
++struct ipt_route_target_info {
++	char      oif[IPT_ROUTE_IFNAMSIZ];      /* Output Interface Name */
++	char      iif[IPT_ROUTE_IFNAMSIZ];      /* Input Interface Name  */
++	u_int32_t gw;                           /* IP address of gateway */
++	u_int8_t  flags;
++};
++
++/* Values for "flags" field */
++#define IPT_ROUTE_CONTINUE        0x01
++#define IPT_ROUTE_TEE             0x02
++
++#endif /*_IPT_ROUTE_H_target*/
+diff -urN linux-2.6.21.1.old/include/linux/netfilter_ipv6/ip6t_ROUTE.h linux-2.6.21.1.dev/include/linux/netfilter_ipv6/ip6t_ROUTE.h
+--- linux-2.6.21.1.old/include/linux/netfilter_ipv6/ip6t_ROUTE.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter_ipv6/ip6t_ROUTE.h	2007-05-26 20:37:09.001852088 +0200
+@@ -0,0 +1,23 @@
++/* Header file for iptables ip6t_ROUTE target
++ *
++ * (C) 2003 by Cédric de Launois <delaunois@info.ucl.ac.be>
++ *
++ * This software is distributed under GNU GPL v2, 1991
++ */
++#ifndef _IPT_ROUTE_H_target
++#define _IPT_ROUTE_H_target
++
++#define IP6T_ROUTE_IFNAMSIZ 16
++
++struct ip6t_route_target_info {
++	char      oif[IP6T_ROUTE_IFNAMSIZ];     /* Output Interface Name */
++	char      iif[IP6T_ROUTE_IFNAMSIZ];     /* Input Interface Name  */
++	u_int32_t gw[4];                        /* IPv6 address of gateway */
++	u_int8_t  flags;
++};
++
++/* Values for "flags" field */
++#define IP6T_ROUTE_CONTINUE        0x01
++#define IP6T_ROUTE_TEE             0x02
++
++#endif /*_IP6T_ROUTE_H_target*/
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/ipt_ROUTE.c linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_ROUTE.c
+--- linux-2.6.21.1.old/net/ipv4/netfilter/ipt_ROUTE.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/ipt_ROUTE.c	2007-05-26 20:37:09.001852088 +0200
+@@ -0,0 +1,483 @@
++/*
++ * This implements the ROUTE target, which enables you to setup unusual
++ * routes not supported by the standard kernel routing table.
++ *
++ * Copyright (C) 2002 Cedric de Launois <delaunois@info.ucl.ac.be>
++ *
++ * v 1.11 2004/11/23
++ *
++ * This software is distributed under GNU GPL v2, 1991
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <net/netfilter/nf_conntrack.h>
++#include <linux/netfilter_ipv4/ipt_ROUTE.h>
++#include <linux/netdevice.h>
++#include <linux/route.h>
++#include <linux/version.h>
++#include <linux/if_arp.h>
++#include <net/ip.h>
++#include <net/route.h>
++#include <net/icmp.h>
++#include <net/checksum.h>
++
++#if 0
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Cedric de Launois <delaunois@info.ucl.ac.be>");
++MODULE_DESCRIPTION("iptables ROUTE target module");
++
++/* Try to route the packet according to the routing keys specified in
++ * route_info. Keys are :
++ *  - ifindex : 
++ *      0 if no oif preferred, 
++ *      otherwise set to the index of the desired oif
++ *  - route_info->gw :
++ *      0 if no gateway specified,
++ *      otherwise set to the next host to which the pkt must be routed
++ * If success, skb->dev is the output device to which the packet must 
++ * be sent and skb->dst is not NULL
++ *
++ * RETURN: -1 if an error occured
++ *          1 if the packet was succesfully routed to the 
++ *            destination desired
++ *          0 if the kernel routing table could not route the packet
++ *            according to the keys specified
++ */
++static int route(struct sk_buff *skb,
++		 unsigned int ifindex,
++		 const struct ipt_route_target_info *route_info)
++{
++	int err;
++	struct rtable *rt;
++	struct iphdr *iph = ip_hdr(skb);
++	struct flowi fl = {
++		.oif = ifindex,
++		.nl_u = {
++			.ip4_u = {
++				.daddr = iph->daddr,
++				.saddr = 0,
++				.tos = RT_TOS(iph->tos),
++				.scope = RT_SCOPE_UNIVERSE,
++			}
++		} 
++	};
++	
++	/* The destination address may be overloaded by the target */
++	if (route_info->gw)
++		fl.fl4_dst = route_info->gw;
++	
++	/* Trying to route the packet using the standard routing table. */
++	if ((err = ip_route_output_key(&rt, &fl))) {
++		if (net_ratelimit()) 
++			DEBUGP("ipt_ROUTE: couldn't route pkt (err: %i)",err);
++		return -1;
++	}
++	
++	/* Drop old route. */
++	dst_release(skb->dst);
++	skb->dst = NULL;
++
++	/* Success if no oif specified or if the oif correspond to the 
++	 * one desired */
++	if (!ifindex || rt->u.dst.dev->ifindex == ifindex) {
++		skb->dst = &rt->u.dst;
++		skb->dev = skb->dst->dev;
++		skb->protocol = htons(ETH_P_IP);
++		return 1;
++	}
++	
++	/* The interface selected by the routing table is not the one
++	 * specified by the user. This may happen because the dst address
++	 * is one of our own addresses.
++	 */
++	if (net_ratelimit()) 
++		DEBUGP("ipt_ROUTE: failed to route as desired gw=%u.%u.%u.%u oif=%i (got oif=%i)\n", 
++		       NIPQUAD(route_info->gw), ifindex, rt->u.dst.dev->ifindex);
++	
++	return 0;
++}
++
++
++/* Stolen from ip_finish_output2
++ * PRE : skb->dev is set to the device we are leaving by
++ *       skb->dst is not NULL
++ * POST: the packet is sent with the link layer header pushed
++ *       the packet is destroyed
++ */
++static void ip_direct_send(struct sk_buff *skb)
++{
++	struct dst_entry *dst = skb->dst;
++	struct hh_cache *hh = dst->hh;
++	struct net_device *dev = dst->dev;
++	int hh_len = LL_RESERVED_SPACE(dev);
++
++	/* Be paranoid, rather than too clever. */
++	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
++		struct sk_buff *skb2;
++
++		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
++		if (skb2 == NULL) {
++			kfree_skb(skb);
++			return;
++		}
++		if (skb->sk)
++			skb_set_owner_w(skb2, skb->sk);
++		kfree_skb(skb);
++		skb = skb2;
++	}
++
++	if (hh) {
++		int hh_alen;
++
++		read_lock_bh(&hh->hh_lock);
++		hh_alen = HH_DATA_ALIGN(hh->hh_len);
++  		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
++		read_unlock_bh(&hh->hh_lock);
++		skb_push(skb, hh->hh_len);
++		hh->hh_output(skb);
++	} else if (dst->neighbour)
++		dst->neighbour->output(skb);
++	else {
++		if (net_ratelimit())
++			DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
++		kfree_skb(skb);
++	}
++}
++
++
++/* PRE : skb->dev is set to the device we are leaving by
++ * POST: - the packet is directly sent to the skb->dev device, without 
++ *         pushing the link layer header.
++ *       - the packet is destroyed
++ */
++static inline int dev_direct_send(struct sk_buff *skb)
++{
++	return dev_queue_xmit(skb);
++}
++
++
++static unsigned int route_oif(const struct ipt_route_target_info *route_info,
++			      struct sk_buff *skb) 
++{
++	unsigned int ifindex = 0;
++	struct net_device *dev_out = NULL;
++
++	/* The user set the interface name to use.
++	 * Getting the current interface index.
++	 */
++	if ((dev_out = dev_get_by_name(route_info->oif))) {
++		ifindex = dev_out->ifindex;
++	} else {
++		/* Unknown interface name : packet dropped */
++		if (net_ratelimit()) 
++			DEBUGP("ipt_ROUTE: oif interface %s not found\n", route_info->oif);
++		return NF_DROP;
++	}
++
++	/* Trying the standard way of routing packets */
++	switch (route(skb, ifindex, route_info)) {
++	case 1:
++		dev_put(dev_out);
++		if (route_info->flags & IPT_ROUTE_CONTINUE)
++			return IPT_CONTINUE;
++
++		ip_direct_send(skb);
++		return NF_STOLEN;
++
++	case 0:
++		/* Failed to send to oif. Trying the hard way */
++		if (route_info->flags & IPT_ROUTE_CONTINUE)
++			return NF_DROP;
++
++		if (net_ratelimit()) 
++			DEBUGP("ipt_ROUTE: forcing the use of %i\n",
++			       ifindex);
++
++		/* We have to force the use of an interface.
++		 * This interface must be a tunnel interface since
++		 * otherwise we can't guess the hw address for
++		 * the packet. For a tunnel interface, no hw address
++		 * is needed.
++		 */
++		if ((dev_out->type != ARPHRD_TUNNEL)
++		    && (dev_out->type != ARPHRD_IPGRE)) {
++			if (net_ratelimit()) 
++				DEBUGP("ipt_ROUTE: can't guess the hw addr !\n");
++			dev_put(dev_out);
++			return NF_DROP;
++		}
++	
++		/* Send the packet. This will also free skb
++		 * Do not go through the POST_ROUTING hook because 
++		 * skb->dst is not set and because it will probably
++		 * get confused by the destination IP address.
++		 */
++		skb->dev = dev_out;
++		dev_direct_send(skb);
++		dev_put(dev_out);
++		return NF_STOLEN;
++		
++	default:
++		/* Unexpected error */
++		dev_put(dev_out);
++		return NF_DROP;
++	}
++}
++
++
++static unsigned int route_iif(const struct ipt_route_target_info *route_info,
++			      struct sk_buff *skb) 
++{
++	struct net_device *dev_in = NULL;
++
++	/* Getting the current interface index. */
++	if (!(dev_in = dev_get_by_name(route_info->iif))) {
++		if (net_ratelimit()) 
++			DEBUGP("ipt_ROUTE: iif interface %s not found\n", route_info->iif);
++		return NF_DROP;
++	}
++
++	skb->dev = dev_in;
++	dst_release(skb->dst);
++	skb->dst = NULL;
++
++	netif_rx(skb);
++	dev_put(dev_in);
++	return NF_STOLEN;
++}
++
++
++static unsigned int route_gw(const struct ipt_route_target_info *route_info,
++			     struct sk_buff *skb) 
++{
++	if (route(skb, 0, route_info)!=1)
++		return NF_DROP;
++
++	if (route_info->flags & IPT_ROUTE_CONTINUE)
++		return IPT_CONTINUE;
++
++	ip_direct_send(skb);
++	return NF_STOLEN;
++}
++
++
++/* To detect and deter routed packet loopback when using the --tee option,
++ * we take a page out of the raw.patch book: on the copied skb, we set up
++ * a fake ->nfct entry, pointing to the local &route_tee_track. We skip
++ * routing packets when we see they already have that ->nfct.
++ */
++
++static struct nf_conn route_tee_track;
++
++static unsigned int ipt_route_target(struct sk_buff **pskb,
++				     const struct net_device *in,
++				     const struct net_device *out,
++				     unsigned int hooknum,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++				     const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++				     const void *targinfo,
++				     void *userinfo)
++#else
++				     const void *targinfo)
++#endif
++{
++	const struct ipt_route_target_info *route_info = targinfo;
++	struct sk_buff *skb = *pskb;
++	unsigned int res;
++
++	if (skb->nfct == &route_tee_track.ct_general) {
++		/* Loopback - a packet we already routed, is to be
++		 * routed another time. Avoid that, now.
++		 */
++		if (net_ratelimit()) 
++			DEBUGP(KERN_DEBUG "ipt_ROUTE: loopback - DROP!\n");
++		return NF_DROP;
++	}
++
++	/* If we are at PREROUTING or INPUT hook
++	 * the TTL isn't decreased by the IP stack
++	 */
++	if (hooknum == NF_IP_PRE_ROUTING ||
++	    hooknum == NF_IP_LOCAL_IN) {
++
++		struct iphdr *iph = ip_hdr(skb);
++
++		if (iph->ttl <= 1) {
++			struct rtable *rt;
++			struct flowi fl = {
++				.oif = 0,
++				.nl_u = {
++					.ip4_u = {
++						.daddr = iph->daddr,
++						.saddr = iph->saddr,
++						.tos = RT_TOS(iph->tos),
++						.scope = ((iph->tos & RTO_ONLINK) ?
++							  RT_SCOPE_LINK :
++							  RT_SCOPE_UNIVERSE)
++					}
++				} 
++			};
++
++			if (ip_route_output_key(&rt, &fl)) {
++				return NF_DROP;
++			}
++
++			if (skb->dev == rt->u.dst.dev) {
++				/* Drop old route. */
++				dst_release(skb->dst);
++				skb->dst = &rt->u.dst;
++
++				/* this will traverse normal stack, and 
++				 * thus call conntrack on the icmp packet */
++				icmp_send(skb, ICMP_TIME_EXCEEDED, 
++					  ICMP_EXC_TTL, 0);
++			}
++
++			return NF_DROP;
++		}
++
++		/*
++		 * If we are at INPUT the checksum must be recalculated since
++		 * the length could change as the result of a defragmentation.
++		 */
++		if(hooknum == NF_IP_LOCAL_IN) {
++			iph->ttl = iph->ttl - 1;
++			iph->check = 0;
++			iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
++		} else {
++			ip_decrease_ttl(iph);
++		}
++	}
++
++	if ((route_info->flags & IPT_ROUTE_TEE)) {
++		/*
++		 * Copy the *pskb, and route the copy. Will later return
++		 * IPT_CONTINUE for the original skb, which should continue
++		 * on its way as if nothing happened. The copy should be
++		 * independantly delivered to the ROUTE --gw.
++		 */
++		skb = skb_copy(*pskb, GFP_ATOMIC);
++		if (!skb) {
++			if (net_ratelimit()) 
++				DEBUGP(KERN_DEBUG "ipt_ROUTE: copy failed!\n");
++			return IPT_CONTINUE;
++		}
++	}
++
++	/* Tell conntrack to forget this packet since it may get confused 
++	 * when a packet is leaving with dst address == our address.
++	 * Good idea ? Dunno. Need advice.
++	 *
++	 * NEW: mark the skb with our &route_tee_track, so we avoid looping
++	 * on any already routed packet.
++	 */
++	if (!(route_info->flags & IPT_ROUTE_CONTINUE)) {
++		nf_conntrack_put(skb->nfct);
++		skb->nfct = &route_tee_track.ct_general;
++		skb->nfctinfo = IP_CT_NEW;
++		nf_conntrack_get(skb->nfct);
++	}
++
++	if (route_info->oif[0] != '\0') {
++		res = route_oif(route_info, skb);
++	} else if (route_info->iif[0] != '\0') {
++		res = route_iif(route_info, skb);
++	} else if (route_info->gw) {
++		res = route_gw(route_info, skb);
++	} else {
++		if (net_ratelimit()) 
++			DEBUGP(KERN_DEBUG "ipt_ROUTE: no parameter !\n");
++		res = IPT_CONTINUE;
++	}
++
++	if ((route_info->flags & IPT_ROUTE_TEE))
++		res = IPT_CONTINUE;
++
++	return res;
++}
++
++
++static int ipt_route_checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++				const void *e,
++#else
++				const struct ipt_ip *ip,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++				const struct xt_target *target,
++#endif
++				void *targinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++				unsigned int targinfosize,
++#endif
++				unsigned int hook_mask)
++{
++	if (strcmp(tablename, "mangle") != 0) {
++		printk("ipt_ROUTE: bad table `%s', use the `mangle' table.\n",
++		       tablename);
++		return 0;
++	}
++
++	if (hook_mask & ~(  (1 << NF_IP_PRE_ROUTING)
++			    | (1 << NF_IP_LOCAL_IN)
++			    | (1 << NF_IP_FORWARD)
++			    | (1 << NF_IP_LOCAL_OUT)
++			    | (1 << NF_IP_POST_ROUTING))) {
++		printk("ipt_ROUTE: bad hook\n");
++		return 0;
++	}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	if (targinfosize != IPT_ALIGN(sizeof(struct ipt_route_target_info))) {
++		printk(KERN_WARNING "ipt_ROUTE: targinfosize %u != %Zu\n",
++		       targinfosize,
++		       IPT_ALIGN(sizeof(struct ipt_route_target_info)));
++		return 0;
++	}
++#endif
++
++	return 1;
++}
++
++
++static struct ipt_target ipt_route_reg = { 
++	.name = "ROUTE",
++	.target = ipt_route_target,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++	.targetsize = sizeof(struct ipt_route_target_info),
++#endif
++	.checkentry = ipt_route_checkentry,
++	.me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++	/* Set up fake conntrack (stolen from raw.patch):
++	    - to never be deleted, not in any hashes */
++	atomic_set(&route_tee_track.ct_general.use, 1);
++	/*  - and look it like as a confirmed connection */
++	set_bit(IPS_CONFIRMED_BIT, &route_tee_track.status);
++	/* Initialize fake conntrack so that NAT will skip it */
++	route_tee_track.status |= IPS_NAT_DONE_MASK;
++
++	return xt_register_target(&ipt_route_reg);
++}
++
++
++static void __exit fini(void)
++{
++	xt_unregister_target(&ipt_route_reg);
++}
++
++module_init(init);
++module_exit(fini);
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Kconfig	2007-05-26 20:37:08.734892672 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Kconfig	2007-05-26 20:37:09.002851936 +0200
+@@ -562,5 +562,22 @@
+ 	  To compile it as a module, choose M here.  If unsure, say N.
+ 
+ 
++config IP_NF_TARGET_ROUTE
++	tristate  'ROUTE target support'
++	depends on IP_NF_MANGLE
++	help
++	  This option adds a `ROUTE' target, which enables you to setup unusual
++	  routes. For example, the ROUTE lets you route a received packet through 
++	  an interface or towards a host, even if the regular destination of the 
++	  packet is the router itself. The ROUTE target is also able to change the 
++	  incoming interface of a packet.
++	
++	  The target can be or not a final target. It has to be used inside the 
++	  mangle table.
++	  
++	  If you want to compile it as a module, say M here and read
++	  Documentation/modules.txt.  The module will be called ipt_ROUTE.o.
++	  If unsure, say `N'.
++
+ endmenu
+ 
+diff -urN linux-2.6.21.1.old/net/ipv4/netfilter/Makefile linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv4/netfilter/Makefile	2007-05-26 20:37:08.735892520 +0200
++++ linux-2.6.21.1.dev/net/ipv4/netfilter/Makefile	2007-05-26 20:37:09.002851936 +0200
+@@ -61,6 +61,7 @@
+ obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
+ obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
+ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
++obj-$(CONFIG_IP_NF_TARGET_ROUTE) += ipt_ROUTE.o
+ obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
+ obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
+ obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
+diff -urN linux-2.6.21.1.old/net/ipv6/ndisc.c linux-2.6.21.1.dev/net/ipv6/ndisc.c
+--- linux-2.6.21.1.old/net/ipv6/ndisc.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv6/ndisc.c	2007-05-26 20:37:09.003851784 +0200
+@@ -154,6 +154,8 @@
+ 	.gc_thresh3 =	1024,
+ };
+ 
++EXPORT_SYMBOL(nd_tbl);
++
+ /* ND options */
+ struct ndisc_options {
+ 	struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
+diff -urN linux-2.6.21.1.old/net/ipv6/netfilter/ip6t_ROUTE.c linux-2.6.21.1.dev/net/ipv6/netfilter/ip6t_ROUTE.c
+--- linux-2.6.21.1.old/net/ipv6/netfilter/ip6t_ROUTE.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/ipv6/netfilter/ip6t_ROUTE.c	2007-05-26 20:37:09.003851784 +0200
+@@ -0,0 +1,330 @@
++/*
++ * This implements the ROUTE v6 target, which enables you to setup unusual
++ * routes not supported by the standard kernel routing table.
++ *
++ * Copyright (C) 2003 Cedric de Launois <delaunois@info.ucl.ac.be>
++ *
++ * v 1.1 2004/11/23
++ *
++ * This software is distributed under GNU GPL v2, 1991
++ */
++
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ipv6.h>
++#include <linux/netfilter_ipv6/ip6_tables.h>
++#include <linux/netfilter_ipv6/ip6t_ROUTE.h>
++#include <linux/netdevice.h>
++#include <linux/version.h>
++#include <net/ipv6.h>
++#include <net/ndisc.h>
++#include <net/ip6_route.h>
++#include <linux/icmpv6.h>
++
++#if 1
++#define DEBUGP printk
++#else
++#define DEBUGP(format, args...)
++#endif
++
++#define NIP6(addr) \
++	ntohs((addr).s6_addr16[0]), \
++	ntohs((addr).s6_addr16[1]), \
++	ntohs((addr).s6_addr16[2]), \
++	ntohs((addr).s6_addr16[3]), \
++	ntohs((addr).s6_addr16[4]), \
++	ntohs((addr).s6_addr16[5]), \
++	ntohs((addr).s6_addr16[6]), \
++	ntohs((addr).s6_addr16[7])
++
++/* Route the packet according to the routing keys specified in
++ * route_info. Keys are :
++ *  - ifindex : 
++ *      0 if no oif preferred, 
++ *      otherwise set to the index of the desired oif
++ *  - route_info->gw :
++ *      0 if no gateway specified,
++ *      otherwise set to the next host to which the pkt must be routed
++ * If success, skb->dev is the output device to which the packet must 
++ * be sent and skb->dst is not NULL
++ *
++ * RETURN:  1 if the packet was succesfully routed to the 
++ *            destination desired
++ *          0 if the kernel routing table could not route the packet
++ *            according to the keys specified
++ */
++static int 
++route6(struct sk_buff *skb,
++       unsigned int ifindex,
++       const struct ip6t_route_target_info *route_info)
++{
++	struct rt6_info *rt = NULL;
++	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++	struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
++
++	DEBUGP("ip6t_ROUTE: called with: ");
++	DEBUGP("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ipv6h->daddr));
++	DEBUGP("GATEWAY=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(*gw));
++	DEBUGP("OUT=%s\n", route_info->oif);
++	
++	if (ipv6_addr_any(gw))
++		rt = rt6_lookup(&ipv6h->daddr, &ipv6h->saddr, ifindex, 1);
++	else
++		rt = rt6_lookup(gw, &ipv6h->saddr, ifindex, 1);
++
++	if (!rt)
++		goto no_route;
++
++	DEBUGP("ip6t_ROUTE: routing gives: ");
++	DEBUGP("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(rt->rt6i_dst.addr));
++	DEBUGP("GATEWAY=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(rt->rt6i_gateway));
++	DEBUGP("OUT=%s\n", rt->rt6i_dev->name);
++
++	if (ifindex && rt->rt6i_dev->ifindex!=ifindex)
++		goto wrong_route;
++	
++	if (!rt->rt6i_nexthop) {
++		DEBUGP("ip6t_ROUTE: discovering neighbour\n");
++		rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_dst.addr);
++	}
++
++	/* Drop old route. */
++	dst_release(skb->dst);
++	skb->dst = &rt->u.dst;
++	skb->dev = rt->rt6i_dev;
++	return 1;
++
++ wrong_route:
++	dst_release(&rt->u.dst);
++ no_route:
++	if (!net_ratelimit())
++		return 0;
++
++	printk("ip6t_ROUTE: no explicit route found ");
++	if (ifindex)
++		printk("via interface %s ", route_info->oif);
++	if (!ipv6_addr_any(gw))
++		printk("via gateway %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", NIP6(*gw));
++	printk("\n");
++	return 0;
++}
++
++
++/* Stolen from ip6_output_finish
++ * PRE : skb->dev is set to the device we are leaving by
++ *       skb->dst is not NULL
++ * POST: the packet is sent with the link layer header pushed
++ *       the packet is destroyed
++ */
++static void ip_direct_send(struct sk_buff *skb)
++{
++	struct dst_entry *dst = skb->dst;
++	struct hh_cache *hh = dst->hh;
++
++	if (hh) {
++		read_lock_bh(&hh->hh_lock);
++		memcpy(skb->data - 16, hh->hh_data, 16);
++		read_unlock_bh(&hh->hh_lock);
++		skb_push(skb, hh->hh_len);
++		hh->hh_output(skb);
++	} else if (dst->neighbour)
++		dst->neighbour->output(skb);
++	else {
++		if (net_ratelimit())
++			DEBUGP(KERN_DEBUG "ip6t_ROUTE: no hdr & no neighbour cache!\n");
++		kfree_skb(skb);
++	}
++}
++
++
++static unsigned int 
++route6_oif(const struct ip6t_route_target_info *route_info,
++	   struct sk_buff *skb) 
++{
++	unsigned int ifindex = 0;
++	struct net_device *dev_out = NULL;
++
++	/* The user set the interface name to use.
++	 * Getting the current interface index.
++	 */
++	if ((dev_out = dev_get_by_name(route_info->oif))) {
++		ifindex = dev_out->ifindex;
++	} else {
++		/* Unknown interface name : packet dropped */
++		if (net_ratelimit()) 
++			DEBUGP("ip6t_ROUTE: oif interface %s not found\n", route_info->oif);
++
++		if (route_info->flags & IP6T_ROUTE_CONTINUE)
++			return IP6T_CONTINUE;
++		else
++			return NF_DROP;
++	}
++
++	/* Trying the standard way of routing packets */
++	if (route6(skb, ifindex, route_info)) {
++		dev_put(dev_out);
++		if (route_info->flags & IP6T_ROUTE_CONTINUE)
++			return IP6T_CONTINUE;
++		
++		ip_direct_send(skb);
++		return NF_STOLEN;
++	} else 
++		return NF_DROP;
++}
++
++
++static unsigned int 
++route6_gw(const struct ip6t_route_target_info *route_info,
++	  struct sk_buff *skb) 
++{
++	if (route6(skb, 0, route_info)) {
++		if (route_info->flags & IP6T_ROUTE_CONTINUE)
++			return IP6T_CONTINUE;
++
++		ip_direct_send(skb);
++		return NF_STOLEN;
++	} else
++		return NF_DROP;
++}
++
++
++static unsigned int 
++ip6t_route_target(struct sk_buff **pskb,
++		  const struct net_device *in,
++		  const struct net_device *out,
++		  unsigned int hooknum,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++		  const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++		  const void *targinfo,
++		  void *userinfo)
++#else
++		  const void *targinfo)
++#endif
++{
++	const struct ip6t_route_target_info *route_info = targinfo;
++	struct sk_buff *skb = *pskb;
++	struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
++	unsigned int res;
++
++	if (route_info->flags & IP6T_ROUTE_CONTINUE)
++		goto do_it;
++
++	/* If we are at PREROUTING or INPUT hook
++	 * the TTL isn't decreased by the IP stack
++	 */
++	if (hooknum == NF_IP6_PRE_ROUTING ||
++	    hooknum == NF_IP6_LOCAL_IN) {
++
++		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++
++		if (ipv6h->hop_limit <= 1) {
++			/* Force OUTPUT device used as source address */
++			skb->dev = skb->dst->dev;
++
++			icmpv6_send(skb, ICMPV6_TIME_EXCEED, 
++				    ICMPV6_EXC_HOPLIMIT, 0, skb->dev);
++
++			return NF_DROP;
++		}
++
++		ipv6h->hop_limit--;
++	}
++
++	if ((route_info->flags & IP6T_ROUTE_TEE)) {
++		/*
++		 * Copy the *pskb, and route the copy. Will later return
++		 * IP6T_CONTINUE for the original skb, which should continue
++		 * on its way as if nothing happened. The copy should be
++		 * independantly delivered to the ROUTE --gw.
++		 */
++		skb = skb_copy(*pskb, GFP_ATOMIC);
++		if (!skb) {
++			if (net_ratelimit()) 
++				DEBUGP(KERN_DEBUG "ip6t_ROUTE: copy failed!\n");
++			return IP6T_CONTINUE;
++		}
++	}
++
++do_it:
++	if (route_info->oif[0]) {
++		res = route6_oif(route_info, skb);
++	} else if (!ipv6_addr_any(gw)) {
++		res = route6_gw(route_info, skb);
++	} else {
++		if (net_ratelimit()) 
++			DEBUGP(KERN_DEBUG "ip6t_ROUTE: no parameter !\n");
++		res = IP6T_CONTINUE;
++	}
++
++	if ((route_info->flags & IP6T_ROUTE_TEE))
++		res = IP6T_CONTINUE;
++
++	return res;
++}
++
++
++static int 
++ip6t_route_checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		      const void *entry,
++#else
++		      const struct ip6t_entry *entry
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++		      const struct xt_target *target,
++#endif
++		      void *targinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++		      unsigned int targinfosize,
++#endif
++		      unsigned int hook_mask)
++{
++	if (strcmp(tablename, "mangle") != 0) {
++		printk("ip6t_ROUTE: can only be called from \"mangle\" table.\n");
++		return 0;
++	}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_route_target_info))) {
++		printk(KERN_WARNING "ip6t_ROUTE: targinfosize %u != %Zu\n",
++		       targinfosize,
++		       IP6T_ALIGN(sizeof(struct ip6t_route_target_info)));
++		return 0;
++	}
++#endif
++
++	return 1;
++}
++
++
++static struct ip6t_target ip6t_route_reg = {
++	.name       = "ROUTE",
++	.target     = ip6t_route_target,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++	.targetsize = sizeof(struct ip6t_route_target_info),
++#endif
++	.checkentry = ip6t_route_checkentry,
++	.me         = THIS_MODULE
++};
++
++
++static int __init init(void)
++{
++	printk(KERN_DEBUG "registering ipv6 ROUTE target\n");
++	if (xt_register_target(&ip6t_route_reg))
++		return -EINVAL;
++
++	return 0;
++}
++
++
++static void __exit fini(void)
++{
++	xt_unregister_target(&ip6t_route_reg);
++}
++
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
+diff -urN linux-2.6.21.1.old/net/ipv6/netfilter/Kconfig linux-2.6.21.1.dev/net/ipv6/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/ipv6/netfilter/Kconfig	2007-05-26 20:37:08.809881272 +0200
++++ linux-2.6.21.1.dev/net/ipv6/netfilter/Kconfig	2007-05-26 20:37:09.003851784 +0200
+@@ -209,5 +209,18 @@
+ 	  If you want to compile it as a module, say M here and read
+ 	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+ 
++config IP6_NF_TARGET_ROUTE
++	tristate 'ROUTE target support'
++	depends on IP6_NF_MANGLE
++	help
++	  This option adds a `ROUTE' target, which enables you to setup unusual
++	  routes. The ROUTE target is also able to change the incoming interface
++	  of a packet.
++	
++	  The target can be or not a final target. It has to be used inside the 
++	  mangle table.
++	  
++	  Not working as a module.
++
+ endmenu
+ 
+diff -urN linux-2.6.21.1.old/net/ipv6/netfilter/Makefile linux-2.6.21.1.dev/net/ipv6/netfilter/Makefile
+--- linux-2.6.21.1.old/net/ipv6/netfilter/Makefile	2007-05-26 20:37:08.809881272 +0200
++++ linux-2.6.21.1.dev/net/ipv6/netfilter/Makefile	2007-05-26 20:37:09.003851784 +0200
+@@ -20,6 +20,7 @@
+ obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
+ obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
+ obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
++obj-$(CONFIG_IP6_NF_TARGET_ROUTE) += ip6t_ROUTE.o
+ obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
+ 
+ # objects for l3 independent conntrack
diff --git a/target/linux/generic-2.6/patches-2.6.22/170-netfilter_chaostables.patch b/target/linux/generic-2.6/patches-2.6.22/170-netfilter_chaostables.patch
new file mode 100644
index 0000000000..2bc678e961
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/170-netfilter_chaostables.patch
@@ -0,0 +1,958 @@
+diff -urN linux-2.6.21.1.old/include/linux/netfilter/oot_conntrack.h linux-2.6.21.1.dev/include/linux/netfilter/oot_conntrack.h
+--- linux-2.6.21.1.old/include/linux/netfilter/oot_conntrack.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter/oot_conntrack.h	2007-05-26 20:40:10.922195992 +0200
+@@ -0,0 +1,5 @@
++#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
++#	include <linux/netfilter_ipv4/ip_conntrack.h>
++#else /* linux-2.6.20+ */
++#	include <net/netfilter/nf_nat_rule.h>
++#endif
+diff -urN linux-2.6.21.1.old/include/linux/netfilter/oot_trans.h linux-2.6.21.1.dev/include/linux/netfilter/oot_trans.h
+--- linux-2.6.21.1.old/include/linux/netfilter/oot_trans.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter/oot_trans.h	2007-05-26 20:40:10.940193256 +0200
+@@ -0,0 +1,14 @@
++/* Out of tree workarounds */
++#include <linux/version.h>
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
++#	define HAVE_MATCHINFOSIZE 1
++#	define HAVE_TARGUSERINFO 1
++#	define HAVE_TARGINFOSIZE 1
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
++#	define nfmark mark
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)
++#	define tcp_v4_check(tcph, tcph_sz, s, d, csp) \
++		tcp_v4_check((tcph_sz), (s), (d), (csp))
++#endif
+diff -urN linux-2.6.21.1.old/include/linux/netfilter/xt_CHAOS.h linux-2.6.21.1.dev/include/linux/netfilter/xt_CHAOS.h
+--- linux-2.6.21.1.old/include/linux/netfilter/xt_CHAOS.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter/xt_CHAOS.h	2007-05-26 20:40:10.940193256 +0200
+@@ -0,0 +1,14 @@
++#ifndef _LINUX_XT_CHAOS_H
++#define _LINUX_XT_CHAOS_H 1
++
++enum xt_chaos_variant {
++	XTCHAOS_NORMAL,
++	XTCHAOS_TARPIT,
++	XTCHAOS_DELUDE,
++};
++
++struct xt_chaos_info {
++	enum xt_chaos_variant variant;
++};
++
++#endif /* _LINUX_XT_CHAOS_H */
+diff -urN linux-2.6.21.1.old/include/linux/netfilter/xt_portscan.h linux-2.6.21.1.dev/include/linux/netfilter/xt_portscan.h
+--- linux-2.6.21.1.old/include/linux/netfilter/xt_portscan.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/linux/netfilter/xt_portscan.h	2007-05-26 20:40:10.940193256 +0200
+@@ -0,0 +1,8 @@
++#ifndef _LINUX_XT_PORTSCAN_H
++#define _LINUX_XT_PORTSCAN_H 1
++
++struct xt_portscan_info {
++	unsigned int match_stealth, match_syn, match_cn, match_gr;
++};
++
++#endif /* _LINUX_XT_PORTSCAN_H */
+diff -urN linux-2.6.21.1.old/net/netfilter/find_match.c linux-2.6.21.1.dev/net/netfilter/find_match.c
+--- linux-2.6.21.1.old/net/netfilter/find_match.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/netfilter/find_match.c	2007-05-26 20:40:10.970188696 +0200
+@@ -0,0 +1,39 @@
++/*
++    xt_request_find_match
++    by Jan Engelhardt <jengelh [at] gmx de>, 2006 - 2007
++
++    Based upon linux-2.6.18.5/net/netfilter/x_tables.c:
++    Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License version 2 as
++    published by the Free Software Foundation.
++*/
++#include <linux/err.h>
++#include <linux/netfilter_arp.h>
++#include <linux/socket.h>
++#include <linux/netfilter/x_tables.h>
++
++/*
++ * Yeah this code is sub-optimal, but the function is missing in
++ * mainline so far. -jengelh
++ */
++static struct xt_match *xt_request_find_match_lo(int af, const char *name,
++    u8 revision)
++{
++	static const char *const xt_prefix[] = {
++		[AF_INET]  = "ip",
++		[AF_INET6] = "ip6",
++		[NF_ARP]   = "arp",
++	};
++	struct xt_match *match;
++
++	match = try_then_request_module(xt_find_match(af, name, revision),
++		"%st_%s", xt_prefix[af], name);
++	if(IS_ERR(match) || match == NULL)
++		return NULL;
++
++	return match;
++}
++
++/* In case it goes into mainline, let this out-of-tree package compile */
++#define xt_request_find_match xt_request_find_match_lo
+diff -urN linux-2.6.21.1.old/net/netfilter/Kconfig linux-2.6.21.1.dev/net/netfilter/Kconfig
+--- linux-2.6.21.1.old/net/netfilter/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/netfilter/Kconfig	2007-05-26 20:40:11.003183680 +0200
+@@ -255,6 +255,14 @@
+ 
+ # alphabetically ordered list of targets
+ 
++config NETFILTER_XT_TARGET_CHAOS
++	tristate '"CHAOS" target support'
++	depends on NETFILTER_XTABLES
++	help
++	  This option adds a `CHAOS' target.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
+ config NETFILTER_XT_TARGET_CLASSIFY
+ 	tristate '"CLASSIFY" target support'
+ 	depends on NETFILTER_XTABLES
+@@ -282,6 +290,14 @@
+ 	  <file:Documentation/kbuild/modules.txt>.  The module will be called
+ 	  ipt_CONNMARK.ko.  If unsure, say `N'.
+ 
++config NETFILTER_XT_TARGET_DELUDE
++	tristate '"DELUDE" target support'
++	depends on NETFILTER_XTABLES
++	help
++	  This option adds a `DELUDE' target.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
+ config NETFILTER_XT_TARGET_DSCP
+ 	tristate '"DSCP" target support'
+ 	depends on NETFILTER_XTABLES
+@@ -526,6 +542,14 @@
+ 
+ 	  To compile it as a module, choose M here.  If unsure, say N.
+ 
++config NETFILTER_XT_MATCH_PORTSCAN
++	tristate '"portscan" match support'
++	depends on NETFILTER_XTABLES
++	help
++	  This option adds a 'portscan' match support.
++
++	  To compile it as a module, choose M here.  If unsure, say N.
++
+ config NETFILTER_XT_MATCH_MULTIPORT
+ 	tristate "Multiple port match support"
+ 	depends on NETFILTER_XTABLES
+diff -urN linux-2.6.21.1.old/net/netfilter/Makefile linux-2.6.21.1.dev/net/netfilter/Makefile
+--- linux-2.6.21.1.old/net/netfilter/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/netfilter/Makefile	2007-05-26 20:40:11.003183680 +0200
+@@ -37,8 +37,10 @@
+ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
+ 
+ # targets
++obj-$(CONFIG_NETFILTER_XT_TARGET_CHAOS) += xt_CHAOS.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
++obj-$(CONFIG_NETFILTER_XT_TARGET_DELUDE) += xt_DELUDE.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
+@@ -63,6 +65,7 @@
+ obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
++obj-$(CONFIG_NETFILTER_XT_MATCH_PORTSCAN) += xt_portscan.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
+diff -urN linux-2.6.21.1.old/net/netfilter/xt_CHAOS.c linux-2.6.21.1.dev/net/netfilter/xt_CHAOS.c
+--- linux-2.6.21.1.old/net/netfilter/xt_CHAOS.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/netfilter/xt_CHAOS.c	2007-05-26 20:40:11.004183528 +0200
+@@ -0,0 +1,204 @@
++/*
++	CHAOS target for netfilter
++
++	Copyright © Jan Engelhardt <jengelh [at] gmx de>, 2006 - 2007
++	This program is free software; you can redistribute it and/or modify
++	it under the terms of the GNU General Public License version 2 as
++	published by the Free Software Foundation.
++*/
++#include <linux/icmp.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/stat.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_tcpudp.h>
++#include <linux/netfilter_ipv4/ipt_REJECT.h>
++#include <net/ip.h>
++#include <linux/netfilter/xt_CHAOS.h>
++#include "find_match.c"
++#include <linux/netfilter/oot_trans.h>
++#define PFX KBUILD_MODNAME ": "
++
++/* Module parameters */
++static unsigned int reject_percentage = ~0U * .01;
++static unsigned int delude_percentage = ~0U * .0101;
++module_param(reject_percentage, uint, S_IRUGO | S_IWUSR);
++module_param(delude_percentage, uint, S_IRUGO | S_IWUSR);
++
++/* References to other matches/targets */
++static struct xt_match *xm_tcp;
++static struct xt_target *xt_delude, *xt_reject, *xt_tarpit;
++
++static int have_delude, have_tarpit;
++
++/* Static data for other matches/targets */
++static const struct ipt_reject_info reject_params = {
++	.with = ICMP_HOST_UNREACH,
++};
++
++static const struct xt_tcp tcp_params = {
++	.spts = {0, ~0},
++	.dpts = {0, ~0},
++};
++
++/* CHAOS functions */
++static void xt_chaos_total(const struct xt_chaos_info *info,
++    struct sk_buff **pskb, const struct net_device *in,
++    const struct net_device *out, unsigned int hooknum)
++{
++	const int protoff = ip_hdrlen(*pskb);
++	const int offset  = ntohs(ip_hdr(*pskb)->frag_off) & IP_OFFSET;
++	const struct xt_target *destiny;
++	int hotdrop = 0, ret;
++
++	ret = xm_tcp->match(*pskb, in, out, xm_tcp, &tcp_params,
++	                    offset, protoff, &hotdrop);
++	if(!ret || hotdrop || (unsigned int)net_random() > delude_percentage)
++		return;
++
++	destiny = (info->variant == XTCHAOS_TARPIT) ? xt_tarpit : xt_delude;
++#ifdef HAVE_TARGUSERINFO
++	destiny->target(pskb, in, out, hooknum, destiny, NULL, NULL);
++#else
++	destiny->target(pskb, in, out, hooknum, destiny, NULL);
++#endif
++	return;
++}
++
++static unsigned int xt_chaos_target(struct sk_buff **pskb,
++    const struct net_device *in, const struct net_device *out,
++    unsigned int hooknum, const struct xt_target *target, const void *targinfo
++#ifdef HAVE_TARGUSERINFO
++    ,
++    void *userinfo
++#endif
++    )
++{
++	/* Equivalent to:
++	 * -A chaos -m statistic --mode random --probability \
++	 *         $reject_percentage -j REJECT --reject-with host-unreach;
++	 * -A chaos -p tcp -m statistic --mode random --probability \
++	 *         $delude_percentage -j DELUDE;
++	 * -A chaos -j DROP;
++	 */
++	const struct xt_chaos_info *info = targinfo;
++
++	if((unsigned int)net_random() <= reject_percentage)
++#ifdef HAVE_TARGUSERINFO
++		return xt_reject->target(pskb, in, out, hooknum, target,
++		       &reject_params, userinfo);
++#else
++		return xt_reject->target(pskb, in, out, hooknum, target,
++		       &reject_params);
++#endif
++
++	/* TARPIT/DELUDE may not be called from the OUTPUT chain */
++	if(ip_hdr(*pskb)->protocol == IPPROTO_TCP &&
++	  info->variant != XTCHAOS_NORMAL && hooknum != NF_IP_LOCAL_OUT)
++		xt_chaos_total(info, pskb, in, out, hooknum);
++
++	return NF_DROP;
++}
++
++static int xt_chaos_checkentry(const char *tablename, const void *entry,
++    const struct xt_target *target, void *targinfo,
++#ifdef HAVE_TARGINFOSIZE
++    unsigned int targinfosize,
++#endif
++    unsigned int hook_mask)
++{
++	const struct xt_chaos_info *info = targinfo;
++	if(info->variant == XTCHAOS_DELUDE && !have_delude) {
++		printk(KERN_WARNING PFX "Error: Cannot use --delude when "
++		       "DELUDE module not available\n");
++		return 0;
++	}
++	if(info->variant == XTCHAOS_TARPIT && !have_tarpit) {
++		printk(KERN_WARNING PFX "Error: Cannot use --tarpit when "
++		       "TARPIT module not available\n");
++		return 0;
++	}
++	return 1;
++}
++
++static struct xt_target xt_chaos_info = {
++	.name       = "CHAOS",
++	.target     = xt_chaos_target,
++	.checkentry = xt_chaos_checkentry,
++	.table      = "filter",
++	.targetsize = sizeof(struct xt_chaos_info),
++	.hooks      = (1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD) |
++	              (1 << NF_IP_LOCAL_OUT),
++	.family     = AF_INET,
++	.me         = THIS_MODULE,
++};
++
++static int __init xt_chaos_init(void)
++{
++	int ret = -EINVAL;
++
++	xm_tcp = xt_request_find_match(AF_INET, "tcp", 0);
++	if(xm_tcp == NULL) {
++		printk(KERN_WARNING PFX "Error: Could not find or load "
++		       "\"tcp\" match\n");
++		return -EINVAL;
++	}
++
++	xt_reject = xt_request_find_target(AF_INET, "REJECT", 0);
++	if(xt_reject == NULL) {
++		printk(KERN_WARNING PFX "Error: Could not find or load "
++		       "\"REJECT\" target\n");
++		goto out2;
++	}
++
++	xt_tarpit   = xt_request_find_target(AF_INET, "TARPIT", 0);
++	have_tarpit = xt_tarpit != NULL;
++	if(!have_tarpit)
++		printk(KERN_WARNING PFX "Warning: Could not find or load "
++		       "\"TARPIT\" target\n");
++
++	xt_delude   = xt_request_find_target(AF_INET, "DELUDE", 0);
++	have_delude = xt_delude != NULL;
++	if(!have_delude)
++		printk(KERN_WARNING PFX "Warning: Could not find or load "
++		       "\"DELUDE\" target\n");
++
++	if((ret = xt_register_target(&xt_chaos_info)) != 0) {
++		printk(KERN_WARNING PFX "xt_register_target returned "
++		       "error %d\n", ret);
++		goto out3;
++	}
++
++	return 0;
++
++ out3:
++ 	if(have_delude)
++ 		module_put(xt_delude->me);
++	if(have_tarpit)
++		module_put(xt_tarpit->me);
++	module_put(xt_reject->me);
++ out2:
++	module_put(xm_tcp->me);
++	return ret;
++}
++
++static void __exit xt_chaos_exit(void)
++{
++	xt_unregister_target(&xt_chaos_info);
++	module_put(xm_tcp->me);
++	module_put(xt_reject->me);
++	if(have_delude)
++		module_put(xt_delude->me);
++	if(have_tarpit)
++		module_put(xt_tarpit->me);
++	return;
++}
++
++module_init(xt_chaos_init);
++module_exit(xt_chaos_exit);
++MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>");
++MODULE_DESCRIPTION("netfilter CHAOS target");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_CHAOS");
+diff -urN linux-2.6.21.1.old/net/netfilter/xt_DELUDE.c linux-2.6.21.1.dev/net/netfilter/xt_DELUDE.c
+--- linux-2.6.21.1.old/net/netfilter/xt_DELUDE.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/netfilter/xt_DELUDE.c	2007-05-26 20:40:11.004183528 +0200
+@@ -0,0 +1,288 @@
++/*
++	DELUDE target
++	Copyright © Jan Engelhardt <jengelh [at] gmx de>, 2007
++
++	Based upon linux-2.6.18.5/net/ipv4/netfilter/ipt_REJECT.c:
++	(C) 1999-2001 Paul `Rusty' Russell
++	(C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
++
++	xt_DELUDE acts like REJECT, but does reply with SYN-ACK on SYN.
++
++	This program is free software; you can redistribute it and/or modify
++	it under the terms of the GNU General Public License version 2 as
++	published by the Free Software Foundation.
++*/
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/random.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/icmp.h>
++#include <net/icmp.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <net/route.h>
++#include <net/dst.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#ifdef CONFIG_BRIDGE_NETFILTER
++#	include <linux/netfilter_bridge.h>
++#endif
++#include <linux/netfilter/oot_trans.h>
++#define PFX KBUILD_MODNAME ": "
++
++static inline struct rtable *route_reverse(struct sk_buff *skb,
++					   struct tcphdr *tcph, int hook)
++{
++	struct iphdr *iph = ip_hdr(skb);
++	struct dst_entry *odst;
++	struct flowi fl = {};
++	struct rtable *rt;
++
++	/* We don't require ip forwarding to be enabled to be able to
++	 * send a RST reply for bridged traffic. */
++	if (hook != NF_IP_FORWARD
++#ifdef CONFIG_BRIDGE_NETFILTER
++	    || (skb->nf_bridge && skb->nf_bridge->mask & BRNF_BRIDGED)
++#endif
++	   ) {
++		fl.nl_u.ip4_u.daddr = iph->saddr;
++		if (hook == NF_IP_LOCAL_IN)
++			fl.nl_u.ip4_u.saddr = iph->daddr;
++		fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
++
++		if (ip_route_output_key(&rt, &fl) != 0)
++			return NULL;
++	} else {
++		/* non-local src, find valid iif to satisfy
++		 * rp-filter when calling ip_route_input. */
++		fl.nl_u.ip4_u.daddr = iph->daddr;
++		if (ip_route_output_key(&rt, &fl) != 0)
++			return NULL;
++
++		odst = skb->dst;
++		if (ip_route_input(skb, iph->saddr, iph->daddr,
++		                   RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
++			dst_release(&rt->u.dst);
++			return NULL;
++		}
++		dst_release(&rt->u.dst);
++		rt = (struct rtable *)skb->dst;
++		skb->dst = odst;
++
++		fl.nl_u.ip4_u.daddr = iph->saddr;
++		fl.nl_u.ip4_u.saddr = iph->daddr;
++		fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
++	}
++
++	if (rt->u.dst.error) {
++		dst_release(&rt->u.dst);
++		return NULL;
++	}
++
++	fl.proto = IPPROTO_TCP;
++	fl.fl_ip_sport = tcph->dest;
++	fl.fl_ip_dport = tcph->source;
++
++	xfrm_lookup((struct dst_entry **)&rt, &fl, NULL, 0);
++
++	return rt;
++}
++
++static void send_reset(struct sk_buff *oldskb, int hook)
++{
++	struct sk_buff *nskb;
++	struct iphdr *iph = ip_hdr(oldskb);
++	struct tcphdr _otcph, *oth, *tcph;
++	__be16 tmp_port;
++	__be32 tmp_addr;
++	int needs_ack;
++	unsigned int addr_type;
++
++	/* IP header checks: fragment. */
++	if (iph->frag_off & htons(IP_OFFSET))
++		return;
++
++	oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
++				 sizeof(_otcph), &_otcph);
++	if (oth == NULL)
++		return;
++
++	/* No RST for RST. */
++	if (oth->rst)
++		return;
++
++	/* Check checksum */
++	if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
++		return;
++
++	/* We need a linear, writeable skb.  We also need to expand
++	   headroom in case hh_len of incoming interface < hh_len of
++	   outgoing interface */
++	nskb = skb_copy_expand(oldskb, LL_MAX_HEADER, skb_tailroom(oldskb),
++			       GFP_ATOMIC);
++	if (!nskb)
++		return;
++
++	/* This packet will not be the same as the other: clear nf fields */
++	nf_reset(nskb);
++	nskb->nfmark = 0;
++	skb_init_secmark(nskb);
++
++	skb_shinfo(nskb)->gso_size = 0;
++	skb_shinfo(nskb)->gso_segs = 0;
++	skb_shinfo(nskb)->gso_type = 0;
++
++	tcph = tcp_hdr(nskb);
++
++	/* Swap source and dest */
++	tmp_addr = ip_hdr(nskb)->saddr;
++	ip_hdr(nskb)->saddr = ip_hdr(nskb)->daddr;
++	ip_hdr(nskb)->daddr = tmp_addr;
++	tmp_port = tcph->source;
++	tcph->source = tcph->dest;
++	tcph->dest = tmp_port;
++
++	/* Truncate to length (no data) */
++	tcph->doff = sizeof(struct tcphdr)/4;
++	skb_trim(nskb, ip_hdrlen(nskb) + sizeof(struct tcphdr));
++	ip_hdr(nskb)->tot_len = htons(nskb->len);
++
++	if(oth->syn && !oth->ack && !oth->rst && !oth->fin) {
++		/* DELUDE essential part */
++		tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
++		                oldskb->len - ip_hdrlen(oldskb) -
++		                (oth->doff << 2));
++		tcph->seq     = htonl(secure_tcp_sequence_number(
++		                ip_hdr(nskb)->saddr, ip_hdr(nskb)->daddr,
++			        tcph->source, tcph->dest));
++		tcph->ack     = 1;
++	} else {
++		if(!tcph->ack) {
++			needs_ack = 1;
++			tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin
++					      + oldskb->len - ip_hdrlen(oldskb)
++					      - (oth->doff<<2));
++			tcph->seq = 0;
++		} else {
++			needs_ack = 0;
++			tcph->seq = oth->ack_seq;
++			tcph->ack_seq = 0;
++		}
++
++		/* Reset flags */
++		((u_int8_t *)tcph)[13] = 0;
++		tcph->rst = 1;
++		tcph->ack = needs_ack;
++	}
++
++
++	tcph->window = 0;
++	tcph->urg_ptr = 0;
++
++	/* Adjust TCP checksum */
++	tcph->check = 0;
++	tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
++				   ip_hdr(nskb)->saddr,
++				   ip_hdr(nskb)->daddr,
++				   csum_partial((char *)tcph,
++						sizeof(struct tcphdr), 0));
++
++	/* Set DF, id = 0 */
++	ip_hdr(nskb)->frag_off = htons(IP_DF);
++	ip_hdr(nskb)->id = 0;
++
++	addr_type = RTN_UNSPEC;
++	if (hook != NF_IP_FORWARD
++#ifdef CONFIG_BRIDGE_NETFILTER
++	    || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED)
++#endif
++	   )
++		addr_type = RTN_LOCAL;
++
++	if (ip_route_me_harder(&nskb, addr_type))
++		goto free_nskb;
++
++	nskb->ip_summed = CHECKSUM_NONE;
++
++	/* Adjust IP TTL */
++	ip_hdr(nskb)->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
++
++	/* Adjust IP checksum */
++	ip_hdr(nskb)->check = 0;
++	ip_hdr(nskb)->check = ip_fast_csum((unsigned char *)ip_hdr(nskb),
++					   ip_hdr(nskb)->ihl);
++
++	/* "Never happens" */
++	if (nskb->len > dst_mtu(nskb->dst))
++		goto free_nskb;
++
++	nf_ct_attach(nskb, oldskb);
++
++	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
++		dst_output);
++	return;
++
++ free_nskb:
++	kfree_skb(nskb);
++}
++
++static unsigned int xt_delude_target(struct sk_buff **pskb,
++    const struct net_device *in, const struct net_device *out,
++    unsigned int hooknum, const struct xt_target *target, const void *targinfo
++#ifdef HAVE_TARGUSERINFO
++    ,
++    void *userinfo
++#endif
++    )
++{
++	/* WARNING: This code causes reentry within iptables.
++	   This means that the iptables jump stack is now crap.  We
++	   must return an absolute verdict. --RR */
++	send_reset(*pskb, hooknum);
++	return NF_DROP;
++}
++
++static int xt_delude_check(const char *tablename, const void *e_void,
++    const struct xt_target *target, void *targinfo,
++#ifdef HAVE_TARGINFOSIZE
++    unsigned int targinfosize,
++#endif
++    unsigned int hook_mask)
++{
++	if(hook_mask & ~((1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD))) {
++		printk(KERN_WARNING PFX "DELUDE may not be used in chains "
++		       "other than INPUT and FORWARD\n");
++		return 0;
++	}
++	return 1;
++}
++
++static struct xt_target xt_delude_info = {
++	.name       = "DELUDE",
++	.target     = xt_delude_target,
++	.checkentry = xt_delude_check,
++	.table      = "filter",
++	.hooks      = (1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD) |
++	              (1 << NF_IP_LOCAL_OUT),
++	.proto      = IPPROTO_TCP,
++	.family     = AF_INET,
++	.me         = THIS_MODULE,
++};
++
++static int __init xt_delude_init(void)
++{
++	return xt_register_target(&xt_delude_info);
++}
++
++static void __exit xt_delude_exit(void)
++{
++	xt_unregister_target(&xt_delude_info);
++}
++
++module_init(xt_delude_init);
++module_exit(xt_delude_exit);
++MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>");
++MODULE_DESCRIPTION("netfilter DELUDE target");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_DELUDE");
+diff -urN linux-2.6.21.1.old/net/netfilter/xt_portscan.c linux-2.6.21.1.dev/net/netfilter/xt_portscan.c
+--- linux-2.6.21.1.old/net/netfilter/xt_portscan.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/netfilter/xt_portscan.c	2007-05-26 20:40:11.004183528 +0200
+@@ -0,0 +1,272 @@
++/*
++	portscan match for netfilter
++
++	Written by Jan Engelhardt, 2006 - 2007
++	This program is free software; you can redistribute it and/or modify
++	it under the terms of the GNU General Public License version 2 as
++	published by the Free Software Foundation.
++*/
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/skbuff.h>
++#include <linux/stat.h>
++#include <linux/tcp.h>
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_tcpudp.h>
++#include <linux/netfilter/oot_conntrack.h>
++#include <linux/netfilter/xt_portscan.h>
++#include <linux/netfilter/oot_trans.h>
++#define PFX KBUILD_MODNAME ": "
++
++enum {
++	TCP_FLAGS_ALL3 = TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_SYN,
++	TCP_FLAGS_ALL4 = TCP_FLAGS_ALL3 | TCP_FLAG_ACK,
++	TCP_FLAGS_ALL6 = TCP_FLAGS_ALL4 | TCP_FLAG_PSH | TCP_FLAG_URG,
++};
++
++/* Module parameters */
++static unsigned int
++	connmark_mask = ~0,
++	packet_mask   = ~0,
++	mark_seen     = 0x9,
++	mark_synrcv   = 0x1,
++	mark_closed   = 0x2,
++	mark_synscan  = 0x3,
++	mark_estab1   = 0x4,
++	mark_estab2   = 0x5,
++	mark_cnscan   = 0x6,
++	mark_grscan   = 0x7,
++	mark_valid    = 0x8;
++
++module_param(connmark_mask, uint, S_IRUGO | S_IWUSR);
++module_param(packet_mask,   uint, S_IRUGO | S_IWUSR);
++module_param(mark_seen,     uint, S_IRUGO | S_IWUSR);
++module_param(mark_synrcv,   uint, S_IRUGO | S_IWUSR);
++module_param(mark_closed,   uint, S_IRUGO | S_IWUSR);
++module_param(mark_synscan,  uint, S_IRUGO | S_IWUSR);
++module_param(mark_estab1,   uint, S_IRUGO | S_IWUSR);
++module_param(mark_estab2,   uint, S_IRUGO | S_IWUSR);
++module_param(mark_cnscan,   uint, S_IRUGO | S_IWUSR);
++module_param(mark_grscan,   uint, S_IRUGO | S_IWUSR);
++module_param(mark_valid,    uint, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(connmark_mask, "only set specified bits in connection mark");
++MODULE_PARM_DESC(packet_mask,   "only set specified bits in packet mark");
++MODULE_PARM_DESC(mark_seen,     "nfmark value for packet-seen state");
++MODULE_PARM_DESC(mark_synrcv,   "connmark value for SYN Received state");
++MODULE_PARM_DESC(mark_closed,   "connmark value for closed state");
++MODULE_PARM_DESC(mark_synscan,  "connmark value for SYN Scan state");
++MODULE_PARM_DESC(mark_estab1,   "connmark value for Established-1 state");
++MODULE_PARM_DESC(mark_estab2,   "connmark value for Established-2 state");
++MODULE_PARM_DESC(mark_cnscan,   "connmark value for Connect Scan state");
++MODULE_PARM_DESC(mark_grscan,   "connmark value for Grab Scan state");
++MODULE_PARM_DESC(mark_valid,    "connmark value for Valid state");
++
++/* TCP flag functions */
++static inline int tflg_ack4(const struct tcphdr *th)
++{
++	return (tcp_flag_word(th) & TCP_FLAGS_ALL4) == TCP_FLAG_ACK;
++}
++
++static inline int tflg_ack6(const struct tcphdr *th)
++{
++	return (tcp_flag_word(th) & TCP_FLAGS_ALL6) == TCP_FLAG_ACK;
++}
++
++static inline int tflg_fin(const struct tcphdr *th)
++{
++	return (tcp_flag_word(th) & TCP_FLAGS_ALL3) == TCP_FLAG_FIN;
++}
++
++static inline int tflg_rst(const struct tcphdr *th)
++{
++	return (tcp_flag_word(th) & TCP_FLAGS_ALL3) == TCP_FLAG_RST;
++}
++
++static inline int tflg_rstack(const struct tcphdr *th)
++{
++	return (tcp_flag_word(th) & TCP_FLAGS_ALL4) ==
++	       (TCP_FLAG_ACK | TCP_FLAG_RST);
++}
++
++static inline int tflg_syn(const struct tcphdr *th)
++{
++	return (tcp_flag_word(th) & TCP_FLAGS_ALL4) == TCP_FLAG_SYN;
++}
++
++static inline int tflg_synack(const struct tcphdr *th)
++{
++	return (tcp_flag_word(th) & TCP_FLAGS_ALL4) ==
++	       (TCP_FLAG_SYN | TCP_FLAG_ACK);
++}
++
++/* portscan functions */
++static inline int xt_portscan_stealth(const struct tcphdr *th)
++{
++	/*
++	 * "Connection refused" replies to our own probes must not be matched.
++	 */
++	if(tflg_rstack(th))
++		return 0;
++
++	if(tflg_rst(th) && printk_ratelimit()) {
++		printk(KERN_WARNING PFX "Warning: Pure RST received\n");
++		return 0;
++	}
++
++	/*
++	 * -p tcp ! --syn -m conntrack --ctstate INVALID: Looking for non-start
++	 * packets that are not associated with any connection -- this will
++	 * match most scan types (NULL, XMAS, FIN) and ridiculous flag
++	 * combinations (SYN-RST, SYN-FIN, SYN-FIN-RST, FIN-RST, etc.).
++	 */
++	return !tflg_syn(th);
++}
++
++static inline int xt_portscan_full(int mark, enum ip_conntrack_info ctstate,
++    int loopback, const struct tcphdr *tcph, int payload_len)
++{
++	if(mark == mark_estab2) {
++		/*
++		 * -m connmark --mark $ESTAB2
++		 */
++		if(tflg_ack4(tcph) && payload_len == 0)
++			return mark; /* keep mark */
++		else if(tflg_rst(tcph) || tflg_fin(tcph))
++			return mark_grscan;
++		else
++			return mark_valid;
++	} else if(mark == mark_estab1) {
++		/*
++		 * -m connmark --mark $ESTAB1
++		 */
++		if(tflg_rst(tcph) || tflg_fin(tcph))
++			return mark_cnscan;
++		else if(!loopback && tflg_ack4(tcph) && payload_len == 0)
++			return mark_estab2;
++		else
++			return mark_valid;
++	} else if(mark == mark_synrcv) {
++		/*
++		 * -m connmark --mark $SYN
++		 */
++		if(loopback && tflg_synack(tcph))
++			return mark; /* keep mark */
++		else if(loopback && tflg_rstack(tcph))
++			return mark_closed;
++		else if(tflg_ack6(tcph))
++			return mark_estab1;
++		else
++			return mark_synscan;
++	} else if(ctstate == IP_CT_NEW && tflg_syn(tcph)) {
++		/*
++		 * -p tcp --syn --ctstate NEW
++		 */
++		return mark_synrcv;
++	}
++	return mark;
++}
++
++static int xt_portscan_match(const struct sk_buff *skb,
++    const struct net_device *in, const struct net_device *out,
++    const struct xt_match *match, const void *matchinfo, int offset,
++    unsigned int protoff, int *hotdrop)
++{
++	const struct xt_portscan_info *info = matchinfo;
++	enum ip_conntrack_info ctstate;
++	struct nf_conn *ctdata;
++	const struct tcphdr *tcph;
++	struct tcphdr tcph_buf;
++
++	tcph = skb_header_pointer(skb, protoff, sizeof(tcph_buf), &tcph_buf);
++	if(tcph == NULL)
++		return 0;
++
++	/* Check for invalid packets: -m conntrack --ctstate INVALID */
++	if((ctdata = nf_ct_get(skb, &ctstate)) == NULL) {
++		if(info->match_stealth)
++			return xt_portscan_stealth(tcph);
++		/*
++		 * If @ctdata is NULL, we cannot match the other scan
++		 * types, return.
++		 */
++		return 0;
++	}
++
++	/*
++	 * If -m portscan was previously applied to this packet, the rules we
++	 * simulate must not be run through again. And for speedup, do not call
++	 * it either when the connection is already VALID.
++	 */
++	if((ctdata->mark & connmark_mask) == mark_valid ||
++	  (skb->nfmark & packet_mask) != mark_seen)
++	{
++		unsigned int n;
++		n = xt_portscan_full(ctdata->mark & connmark_mask, ctstate,
++		    in == &loopback_dev, tcph,
++		    skb->len - protoff - 4 * tcph->doff);
++
++		ctdata->mark = (ctdata->mark & ~connmark_mask) | n;
++		((struct sk_buff *)skb)->nfmark =
++			(skb->nfmark & ~packet_mask) | mark_seen;
++	}
++
++	return (info->match_syn && ctdata->mark == mark_synscan) ||
++	       (info->match_cn && ctdata->mark == mark_cnscan) ||
++	       (info->match_gr && ctdata->mark == mark_grscan);
++}
++
++static int xt_portscan_checkentry(const char *tablename, const void *entry,
++    const struct xt_match *match, void *matchinfo,
++#ifdef HAVE_MATCHINFOSIZE
++    unsigned int matchinfosize,
++#endif
++    unsigned int hook_mask)
++{
++	const struct xt_portscan_info *info = matchinfo;
++#ifdef HAVE_MATCHINFOSIZE
++	if(matchinfosize != XT_ALIGN(sizeof(struct xt_portscan_info))) {
++		printk(KERN_WARNING PFX "matchinfosize %u != %Zu\n",
++		       matchinfosize,
++		       XT_ALIGN(sizeof(struct xt_portscan_info)));
++		return 0;
++	}
++#endif
++	if((info->match_stealth & ~1) || (info->match_syn & ~1) ||
++	  (info->match_cn & ~1) || (info->match_gr & ~1)) {
++		printk(KERN_WARNING PFX "Invalid flags\n");
++		return 0;
++	}
++	return 1;
++}
++
++static struct xt_match xt_portscan = {
++	.name       = "portscan",
++	.match      = xt_portscan_match,
++	.checkentry = xt_portscan_checkentry,
++	.matchsize  = sizeof(struct xt_portscan_info),
++	.proto      = IPPROTO_TCP,
++	.family     = AF_INET,
++	.me         = THIS_MODULE,
++};
++
++static int __init xt_portscan_init(void)
++{
++	return xt_register_match(&xt_portscan);
++}
++
++static void __exit xt_portscan_exit(void)
++{
++	xt_unregister_match(&xt_portscan);
++	return;
++}
++
++module_init(xt_portscan_init);
++module_exit(xt_portscan_exit);
++MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>");
++MODULE_DESCRIPTION("netfilter portscan match module");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_portscan");
+diff -urN linux-2.6.22-rc3.old/drivers/char/random.c linux-2.6.22-rc3.dev/drivers/char/random.c
+--- linux-2.6.22-rc3.old/drivers/char/random.c	2007-05-26 03:55:14.000000000 +0100
++++ linux-2.6.22-rc3.dev/drivers/char/random.c	2007-05-29 11:21:53.000000000 +0100
+@@ -1557,6 +1557,8 @@
+ 	return seq;
+ }
+ 
++EXPORT_SYMBOL(secure_tcp_sequence_number);
++
+ /* Generate secure starting point for ephemeral IPV4 transport port search */
+ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
+ {
diff --git a/target/linux/generic-2.6/patches-2.6.22/180-netfilter_depends.patch b/target/linux/generic-2.6/patches-2.6.22/180-netfilter_depends.patch
new file mode 100644
index 0000000000..435027d68b
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/180-netfilter_depends.patch
@@ -0,0 +1,20 @@
+--- linux.old/net/netfilter/Kconfig	2007-05-26 20:05:39.270134976 +0200
++++ linux.dev/net/netfilter/Kconfig	2007-05-26 20:12:44.258526920 +0200
+@@ -133,7 +133,7 @@
+ 
+ config NF_CONNTRACK_H323
+ 	tristate "H.323 protocol support (EXPERIMENTAL)"
+-	depends on EXPERIMENTAL && NF_CONNTRACK && (IPV6 || IPV6=n)
++	depends on EXPERIMENTAL && NF_CONNTRACK
+ 	help
+ 	  H.323 is a VoIP signalling protocol from ITU-T. As one of the most
+ 	  important VoIP protocols, it is widely used by voice hardware and
+@@ -381,7 +381,7 @@
+ 
+ config NETFILTER_XT_TARGET_TCPMSS
+ 	tristate '"TCPMSS" target support'
+-	depends on NETFILTER_XTABLES && (IPV6 || IPV6=n)
++	depends on NETFILTER_XTABLES
+ 	---help---
+ 	  This option adds a `TCPMSS' target, which allows you to alter the
+ 	  MSS value of TCP SYN packets, to control the maximum size for that
diff --git a/target/linux/generic-2.6/patches-2.6.22/200-sched_esfq.patch b/target/linux/generic-2.6/patches-2.6.22/200-sched_esfq.patch
new file mode 100644
index 0000000000..9b096b347c
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/200-sched_esfq.patch
@@ -0,0 +1,793 @@
+diff -urN linux-2.6.21.1.old/include/linux/pkt_sched.h linux-2.6.21.1.dev/include/linux/pkt_sched.h
+--- linux-2.6.21.1.old/include/linux/pkt_sched.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/linux/pkt_sched.h	2007-05-26 20:43:12.530587320 +0200
+@@ -146,8 +146,40 @@
+  *
+  *	The only reason for this is efficiency, it is possible
+  *	to change these parameters in compile time.
++ *	
++ *	If you need to play with these values use esfq instead.
+  */
+ 
++/* ESFQ section */
++
++enum
++{
++        /* traditional */
++	TCA_SFQ_HASH_CLASSIC,
++	TCA_SFQ_HASH_DST,
++	TCA_SFQ_HASH_SRC,
++	TCA_SFQ_HASH_FWMARK,
++        /* direct */
++	TCA_SFQ_HASH_DSTDIR,
++	TCA_SFQ_HASH_SRCDIR,
++	TCA_SFQ_HASH_FWMARKDIR,
++	/* conntrack */
++	TCA_SFQ_HASH_CTORIGDST,
++	TCA_SFQ_HASH_CTORIGSRC,
++	TCA_SFQ_HASH_CTREPLDST,
++	TCA_SFQ_HASH_CTREPLSRC,
++};
++
++struct tc_esfq_qopt
++{
++	unsigned	quantum;	/* Bytes per round allocated to flow */
++	int		perturb_period;	/* Period of hash perturbation */
++	__u32		limit;		/* Maximal packets in queue */
++	unsigned	divisor;	/* Hash divisor  */
++	unsigned	flows;		/* Maximal number of flows  */
++	unsigned	hash_kind;	/* Hash function to use for flow identification */
++};
++
+ /* RED section */
+ 
+ enum
+diff -urN linux-2.6.21.1.old/net/sched/Kconfig linux-2.6.21.1.dev/net/sched/Kconfig
+--- linux-2.6.21.1.old/net/sched/Kconfig	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/sched/Kconfig	2007-05-26 20:43:12.572580936 +0200
+@@ -133,6 +133,26 @@
+ 	  To compile this code as a module, choose M here: the
+ 	  module will be called sch_sfq.
+ 
++config NET_SCH_ESFQ
++	tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
++	---help---
++	  Say Y here if you want to use the Enhanced Stochastic Fairness
++	  Queueing (ESFQ) packet scheduling algorithm for some of your network
++	  devices or as a leaf discipline for a classful qdisc such as HTB or
++	  CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
++	  references to the SFQ algorithm).
++
++	  This is an enchanced SFQ version which allows you to control some
++	  hardcoded values in the SFQ scheduler.
++
++	  ESFQ also adds control of the hash function used to identify packet
++	  flows. The original SFQ discipline hashes by connection; ESFQ add
++	  several other hashing methods, such as by src IP or by dst IP, which
++	  can be more fair to users in some networking situations.
++	  
++	  To compile this code as a module, choose M here: the
++	  module will be called sch_esfq.
++
+ config NET_SCH_TEQL
+ 	tristate "True Link Equalizer (TEQL)"
+ 	---help---
+diff -urN linux-2.6.21.1.old/net/sched/Makefile linux-2.6.21.1.dev/net/sched/Makefile
+--- linux-2.6.21.1.old/net/sched/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/sched/Makefile	2007-05-26 20:43:12.577580176 +0200
+@@ -23,6 +23,7 @@
+ obj-$(CONFIG_NET_SCH_INGRESS)	+= sch_ingress.o 
+ obj-$(CONFIG_NET_SCH_DSMARK)	+= sch_dsmark.o
+ obj-$(CONFIG_NET_SCH_SFQ)	+= sch_sfq.o
++obj-$(CONFIG_NET_SCH_ESFQ)	+= sch_esfq.o
+ obj-$(CONFIG_NET_SCH_TBF)	+= sch_tbf.o
+ obj-$(CONFIG_NET_SCH_TEQL)	+= sch_teql.o
+ obj-$(CONFIG_NET_SCH_PRIO)	+= sch_prio.o
+diff -urN linux-2.6.21.1.old/net/sched/sch_esfq.c linux-2.6.21.1.dev/net/sched/sch_esfq.c
+--- linux-2.6.21.1.old/net/sched/sch_esfq.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/net/sched/sch_esfq.c	2007-05-26 20:43:12.578580024 +0200
+@@ -0,0 +1,704 @@
++/*
++ * net/sched/sch_esfq.c	Extended Stochastic Fairness Queueing discipline.
++ *
++ *		This program is free software; you can redistribute it and/or
++ *		modify it under the terms of the GNU General Public License
++ *		as published by the Free Software Foundation; either version
++ *		2 of the License, or (at your option) any later version.
++ *
++ * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
++ *
++ * Changes:	Alexander Atanasov, <alex@ssi.bg>
++ *		Added dynamic depth,limit,divisor,hash_kind options.
++ *		Added dst and src hashes.
++ *
++ * 		Alexander Clouter, <alex@digriz.org.uk>
++ *		Ported ESFQ to Linux 2.6.
++ *
++ * 		Corey Hickey, <bugfood-c@fatooh.org>
++ *		Maintenance of the Linux 2.6 port.
++ *		Added fwmark hash (thanks to Robert Kurjata).
++ *		Added direct hashing for src, dst, and fwmark.
++ *		Added usage of jhash.
++ *		
++ */
++
++#include <linux/module.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <linux/bitops.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/jiffies.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/socket.h>
++#include <linux/sockios.h>
++#include <linux/in.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/if_ether.h>
++#include <linux/inet.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/notifier.h>
++#include <linux/init.h>
++#include <net/ip.h>
++#include <linux/ipv6.h>
++#include <net/route.h>
++#include <linux/skbuff.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <linux/jhash.h>
++
++#ifdef CONFIG_NF_CONNTRACK_ENABLED
++#include <net/netfilter/nf_conntrack.h>
++#endif
++
++/*	Stochastic Fairness Queuing algorithm.
++	For more comments look at sch_sfq.c.
++	The difference is that you can change limit, depth,
++	hash table size and choose alternate hash types.
++	
++	classic:	same as in sch_sfq.c
++	dst:		destination IP address
++	src:		source IP address
++	fwmark:         netfilter mark value
++	dst_direct:
++	src_direct:
++	fwmark_direct:  direct hashing of the above sources
++	ctorigdst:	original destination IP address
++	ctorigsrc:	original source IP address
++	ctrepldst:	reply destination IP address
++	ctreplsrc:	reply source IP 
++	
++*/
++
++
++/* This type should contain at least SFQ_DEPTH*2 values */
++typedef unsigned int esfq_index;
++
++struct esfq_head
++{
++	esfq_index	next;
++	esfq_index	prev;
++};
++
++struct esfq_sched_data
++{
++/* Parameters */
++	int		perturb_period;
++	unsigned	quantum;	/* Allotment per round: MUST BE >= MTU */
++	int		limit;
++	unsigned	depth;
++	unsigned	hash_divisor;
++	unsigned	hash_kind;
++/* Variables */
++	struct timer_list perturb_timer;
++	int		perturbation;
++	esfq_index	tail;		/* Index of current slot in round */
++	esfq_index	max_depth;	/* Maximal depth */
++
++	esfq_index	*ht;			/* Hash table */
++	esfq_index	*next;			/* Active slots link */
++	short		*allot;			/* Current allotment per slot */
++	unsigned short	*hash;			/* Hash value indexed by slots */
++	struct sk_buff_head	*qs;		/* Slot queue */
++	struct esfq_head	*dep;		/* Linked list of slots, indexed by depth */
++	unsigned	dyn_min;	/* For dynamic divisor adjustment; minimum value seen */
++	unsigned	dyn_max;	/*                                 maximum value seen */
++	unsigned	dyn_range;	/*	        		   saved range */
++};
++
++/* This contains the info we will hash. */
++struct esfq_packet_info
++{
++	u32	proto;		/* protocol or port */
++	u32	src;		/* source from packet header */
++	u32	dst;		/* destination from packet header */
++	u32	ctorigsrc;	/* original source from conntrack */
++	u32	ctorigdst;	/* original destination from conntrack */
++	u32	ctreplsrc;	/* reply source from conntrack */
++	u32	ctrepldst;	/* reply destination from conntrack */
++	u32	mark;		/* netfilter mark (fwmark) */
++};
++
++/* Hash input values directly into the "nearest" slot, taking into account the
++ * range of input values seen. This is most useful when the hash table is at
++ * least as large as the range of possible values.
++ * Note: this functionality was added before the change to using jhash, and may
++ * no longer be useful. */
++static __inline__ unsigned esfq_hash_direct(struct esfq_sched_data *q, u32 h)
++{
++	/* adjust minimum and maximum */
++	if (h < q->dyn_min || h > q->dyn_max) {
++		q->dyn_min = h < q->dyn_min ? h : q->dyn_min;
++		q->dyn_max = h > q->dyn_max ? h : q->dyn_max;
++	
++		/* find new range */
++		if ((q->dyn_range = q->dyn_max - q->dyn_min) >= q->hash_divisor)
++			printk(KERN_WARNING "ESFQ: (direct hash) Input range %u is larger than hash "
++					"table. See ESFQ README for details.\n", q->dyn_range);
++	}
++	
++	/* hash input values into slot numbers */
++	if (q->dyn_min == q->dyn_max)
++		return 0; /* only one value seen; avoid division by 0 */
++	else
++		return (h - q->dyn_min) * (q->hash_divisor - 1) / q->dyn_range;
++}
++
++static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
++{
++	return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
++}
++
++static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
++{
++	return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
++}
++
++static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
++{
++	return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
++}
++
++
++static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
++{
++	struct esfq_packet_info info;
++#ifdef CONFIG_NF_CONNTRACK_ENABLED
++	enum ip_conntrack_info ctinfo;
++	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
++#endif
++	
++	switch (skb->protocol) {
++	case __constant_htons(ETH_P_IP):
++	{
++		struct iphdr *iph = ip_hdr(skb);
++		info.dst = iph->daddr;
++		info.src = iph->saddr;
++		if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
++		    (iph->protocol == IPPROTO_TCP ||
++		     iph->protocol == IPPROTO_UDP ||
++		     iph->protocol == IPPROTO_SCTP ||
++		     iph->protocol == IPPROTO_DCCP ||
++		     iph->protocol == IPPROTO_ESP))
++			info.proto = *(((u32*)iph) + iph->ihl);
++		else
++			info.proto = iph->protocol;
++		break;
++	}
++	case __constant_htons(ETH_P_IPV6):
++	{
++		struct ipv6hdr *iph = ipv6_hdr(skb);
++		/* Hash ipv6 addresses into a u32. This isn't ideal,
++		 * but the code is simple. */
++		info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
++		info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
++		if (iph->nexthdr == IPPROTO_TCP ||
++		    iph->nexthdr == IPPROTO_UDP ||
++		    iph->nexthdr == IPPROTO_SCTP ||
++		    iph->nexthdr == IPPROTO_DCCP ||
++		    iph->nexthdr == IPPROTO_ESP)
++			info.proto = *(u32*)&iph[1];
++		else
++			info.proto = iph->nexthdr;
++		break;
++	}
++	default:
++		info.dst   = (u32)(unsigned long)skb->dst;
++		info.src   = (u32)(unsigned long)skb->sk;
++		info.proto = skb->protocol;
++	}
++
++	info.mark = skb->mark;
++
++#ifdef CONFIG_NF_CONNTRACK_ENABLED
++	/* defaults if there is no conntrack info */
++	info.ctorigsrc = info.src;
++	info.ctorigdst = info.dst;
++	info.ctreplsrc = info.dst;
++	info.ctrepldst = info.src;
++	/* collect conntrack info */
++	if (ct && ct != &nf_conntrack_untracked) {
++		if (skb->protocol == __constant_htons(ETH_P_IP)) {
++			info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
++			info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
++			info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
++			info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
++		}
++		else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
++			/* Again, hash ipv6 addresses into a single u32. */
++			info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
++			info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
++			info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
++			info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
++		}
++
++	}
++#endif
++
++	switch(q->hash_kind)
++	{
++	case TCA_SFQ_HASH_CLASSIC:
++		return esfq_jhash_3words(q, info.dst, info.src, info.proto);
++	case TCA_SFQ_HASH_DST:
++		return esfq_jhash_1word(q, info.dst);
++	case TCA_SFQ_HASH_DSTDIR:
++		return esfq_hash_direct(q, ntohl(info.dst));
++	case TCA_SFQ_HASH_SRC:
++		return esfq_jhash_1word(q, info.src);
++	case TCA_SFQ_HASH_SRCDIR:
++		return esfq_hash_direct(q, ntohl(info.src));
++	case TCA_SFQ_HASH_FWMARK:
++		return esfq_jhash_1word(q, info.mark);
++	case TCA_SFQ_HASH_FWMARKDIR:
++		return esfq_hash_direct(q, info.mark);
++#ifdef CONFIG_NF_CONNTRACK_ENABLED
++	case TCA_SFQ_HASH_CTORIGDST:
++		return esfq_jhash_1word(q, info.ctorigdst);
++	case TCA_SFQ_HASH_CTORIGSRC:
++		return esfq_jhash_1word(q, info.ctorigsrc);
++	case TCA_SFQ_HASH_CTREPLDST:
++		return esfq_jhash_1word(q, info.ctrepldst);
++	case TCA_SFQ_HASH_CTREPLSRC:
++		return esfq_jhash_1word(q, info.ctreplsrc);
++#endif
++	default:
++		if (net_ratelimit())
++			printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
++	}
++	return esfq_jhash_3words(q, info.dst, info.src, info.proto);
++}
++
++static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
++{
++	esfq_index p, n;
++	int d = q->qs[x].qlen + q->depth;
++
++	p = d;
++	n = q->dep[d].next;
++	q->dep[x].next = n;
++	q->dep[x].prev = p;
++	q->dep[p].next = q->dep[n].prev = x;
++}
++
++static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
++{
++	esfq_index p, n;
++
++	n = q->dep[x].next;
++	p = q->dep[x].prev;
++	q->dep[p].next = n;
++	q->dep[n].prev = p;
++
++	if (n == p && q->max_depth == q->qs[x].qlen + 1)
++		q->max_depth--;
++
++	esfq_link(q, x);
++}
++
++static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
++{
++	esfq_index p, n;
++	int d;
++
++	n = q->dep[x].next;
++	p = q->dep[x].prev;
++	q->dep[p].next = n;
++	q->dep[n].prev = p;
++	d = q->qs[x].qlen;
++	if (q->max_depth < d)
++		q->max_depth = d;
++
++	esfq_link(q, x);
++}
++
++static unsigned int esfq_drop(struct Qdisc *sch)
++{
++	struct esfq_sched_data *q = qdisc_priv(sch);
++	esfq_index d = q->max_depth;
++	struct sk_buff *skb;
++	unsigned int len;
++
++	/* Queue is full! Find the longest slot and
++	   drop a packet from it */
++
++	if (d > 1) {
++		esfq_index x = q->dep[d+q->depth].next;
++		skb = q->qs[x].prev;
++		len = skb->len;
++		__skb_unlink(skb, &q->qs[x]);
++		kfree_skb(skb);
++		esfq_dec(q, x);
++		sch->q.qlen--;
++		sch->qstats.drops++;
++		sch->qstats.backlog -= len;
++		return len;
++	}
++
++	if (d == 1) {
++		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
++		d = q->next[q->tail];
++		q->next[q->tail] = q->next[d];
++		q->allot[q->next[d]] += q->quantum;
++		skb = q->qs[d].prev;
++		len = skb->len;
++		__skb_unlink(skb, &q->qs[d]);
++		kfree_skb(skb);
++		esfq_dec(q, d);
++		sch->q.qlen--;
++		q->ht[q->hash[d]] = q->depth;
++		sch->qstats.drops++;
++		sch->qstats.backlog -= len;
++		return len;
++	}
++
++	return 0;
++}
++
++static int
++esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
++{
++	struct esfq_sched_data *q = qdisc_priv(sch);
++	unsigned hash = esfq_hash(q, skb);
++	unsigned depth = q->depth;
++	esfq_index x;
++
++	x = q->ht[hash];
++	if (x == depth) {
++		q->ht[hash] = x = q->dep[depth].next;
++		q->hash[x] = hash;
++	}
++	sch->qstats.backlog += skb->len;
++	__skb_queue_tail(&q->qs[x], skb);
++	esfq_inc(q, x);
++	if (q->qs[x].qlen == 1) {		/* The flow is new */
++		if (q->tail == depth) {	/* It is the first flow */
++			q->tail = x;
++			q->next[x] = x;
++			q->allot[x] = q->quantum;
++		} else {
++			q->next[x] = q->next[q->tail];
++			q->next[q->tail] = x;
++			q->tail = x;
++		}
++	}
++	if (++sch->q.qlen < q->limit-1) {
++		sch->bstats.bytes += skb->len;
++		sch->bstats.packets++;
++		return 0;
++	}
++
++	esfq_drop(sch);
++	return NET_XMIT_CN;
++}
++
++static int
++esfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
++{
++	struct esfq_sched_data *q = qdisc_priv(sch);
++	unsigned hash = esfq_hash(q, skb);
++	unsigned depth = q->depth;
++	esfq_index x;
++
++	x = q->ht[hash];
++	if (x == depth) {
++		q->ht[hash] = x = q->dep[depth].next;
++		q->hash[x] = hash;
++	}
++	sch->qstats.backlog += skb->len;
++	__skb_queue_head(&q->qs[x], skb);
++	esfq_inc(q, x);
++	if (q->qs[x].qlen == 1) {		/* The flow is new */
++		if (q->tail == depth) {	/* It is the first flow */
++			q->tail = x;
++			q->next[x] = x;
++			q->allot[x] = q->quantum;
++		} else {
++			q->next[x] = q->next[q->tail];
++			q->next[q->tail] = x;
++			q->tail = x;
++		}
++	}
++	if (++sch->q.qlen < q->limit - 1) {
++		sch->qstats.requeues++;
++		return 0;
++	}
++
++	sch->qstats.drops++;
++	esfq_drop(sch);
++	return NET_XMIT_CN;
++}
++
++
++
++
++static struct sk_buff *
++esfq_dequeue(struct Qdisc* sch)
++{
++	struct esfq_sched_data *q = qdisc_priv(sch);
++	struct sk_buff *skb;
++	unsigned depth = q->depth;
++	esfq_index a, old_a;
++
++	/* No active slots */
++	if (q->tail == depth)
++		return NULL;
++	
++	a = old_a = q->next[q->tail];
++	
++	/* Grab packet */
++	skb = __skb_dequeue(&q->qs[a]);
++	esfq_dec(q, a);
++	sch->q.qlen--;
++	sch->qstats.backlog -= skb->len;
++	
++	/* Is the slot empty? */
++	if (q->qs[a].qlen == 0) {
++		q->ht[q->hash[a]] = depth;
++		a = q->next[a];
++		if (a == old_a) {
++			q->tail = depth;
++			return skb;
++		}
++		q->next[q->tail] = a;
++		q->allot[a] += q->quantum;
++	} else if ((q->allot[a] -= skb->len) <= 0) {
++		q->tail = a;
++		a = q->next[a];
++		q->allot[a] += q->quantum;
++	}
++	
++	return skb;
++}
++
++static void
++esfq_reset(struct Qdisc* sch)
++{
++	struct sk_buff *skb;
++
++	while ((skb = esfq_dequeue(sch)) != NULL)
++		kfree_skb(skb);
++}
++
++static void esfq_perturbation(unsigned long arg)
++{
++	struct Qdisc *sch = (struct Qdisc*)arg;
++	struct esfq_sched_data *q = qdisc_priv(sch);
++
++	q->perturbation = net_random()&0x1F;
++
++	if (q->perturb_period) {
++		q->perturb_timer.expires = jiffies + q->perturb_period;
++		add_timer(&q->perturb_timer);
++	}
++}
++
++static int esfq_change(struct Qdisc *sch, struct rtattr *opt)
++{
++	struct esfq_sched_data *q = qdisc_priv(sch);
++	struct tc_esfq_qopt *ctl = RTA_DATA(opt);
++	int old_perturb = q->perturb_period;
++	
++	if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
++		return -EINVAL;
++	
++	sch_tree_lock(sch);
++	q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
++	q->perturb_period = ctl->perturb_period*HZ;
++//	q->hash_divisor = ctl->divisor;
++//	q->tail = q->limit = q->depth = ctl->flows;
++	
++	if (ctl->limit)
++		q->limit = min_t(u32, ctl->limit, q->depth);
++	
++	if (ctl->hash_kind) {
++		q->hash_kind = ctl->hash_kind;
++		if (q->hash_kind != TCA_SFQ_HASH_CLASSIC)
++			q->perturb_period = 0;
++	}
++	
++	// is sch_tree_lock enough to do this ?
++	while (sch->q.qlen >= q->limit-1)
++		esfq_drop(sch);
++	
++	if (old_perturb)
++		del_timer(&q->perturb_timer);
++	if (q->perturb_period) {
++		q->perturb_timer.expires = jiffies + q->perturb_period;
++		add_timer(&q->perturb_timer);
++	} else {
++		q->perturbation = 0;
++	}
++	sch_tree_unlock(sch);
++	return 0;
++}
++
++static int esfq_init(struct Qdisc *sch, struct rtattr *opt)
++{
++	struct esfq_sched_data *q = qdisc_priv(sch);
++	struct tc_esfq_qopt *ctl;
++	esfq_index p = ~0U/2;
++	int i;
++	
++	if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
++		return -EINVAL;
++
++	init_timer(&q->perturb_timer);
++	q->perturb_timer.data = (unsigned long)sch;
++	q->perturb_timer.function = esfq_perturbation;
++	q->perturbation = 0;
++	q->hash_kind = TCA_SFQ_HASH_CLASSIC;
++	q->max_depth = 0;
++	q->dyn_min = ~0U; /* maximum value for this type */
++	q->dyn_max = 0;  /* dyn_min/dyn_max will be set properly upon first packet */
++	if (opt == NULL) {
++		q->quantum = psched_mtu(sch->dev);
++		q->perturb_period = 0;
++		q->hash_divisor = 1024;
++		q->tail = q->limit = q->depth = 128;
++		
++	} else {
++		ctl = RTA_DATA(opt);
++		q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
++		q->perturb_period = ctl->perturb_period*HZ;
++		q->hash_divisor = ctl->divisor ? : 1024;
++		q->tail = q->limit = q->depth = ctl->flows ? : 128;
++		
++		if ( q->depth > p - 1 )
++			return -EINVAL;
++		
++		if (ctl->limit)
++			q->limit = min_t(u32, ctl->limit, q->depth);
++		
++		if (ctl->hash_kind) {
++			q->hash_kind = ctl->hash_kind;
++		}
++		
++		if (q->perturb_period) {
++			q->perturb_timer.expires = jiffies + q->perturb_period;
++			add_timer(&q->perturb_timer);
++		}
++	}
++	
++	q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
++	if (!q->ht)
++		goto err_case;
++		
++	q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
++	if (!q->dep)
++		goto err_case;
++	q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
++	if (!q->next)
++		goto err_case;
++	
++	q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
++	if (!q->allot)
++		goto err_case;
++	q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
++	if (!q->hash)
++		goto err_case;
++	q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
++	if (!q->qs)
++		goto err_case;
++	
++	for (i=0; i< q->hash_divisor; i++)
++		q->ht[i] = q->depth;
++	for (i=0; i<q->depth; i++) {
++		skb_queue_head_init(&q->qs[i]);
++		q->dep[i+q->depth].next = i+q->depth;
++		q->dep[i+q->depth].prev = i+q->depth;
++	}
++	
++	for (i=0; i<q->depth; i++)
++		esfq_link(q, i);
++	return 0;
++err_case:
++	del_timer(&q->perturb_timer);
++	if (q->ht)
++		kfree(q->ht);
++	if (q->dep)
++		kfree(q->dep);
++	if (q->next)
++		kfree(q->next);
++	if (q->allot)
++		kfree(q->allot);
++	if (q->hash)
++		kfree(q->hash);
++	if (q->qs)
++		kfree(q->qs);
++	return -ENOBUFS;
++}
++
++static void esfq_destroy(struct Qdisc *sch)
++{
++	struct esfq_sched_data *q = qdisc_priv(sch);
++	del_timer(&q->perturb_timer);
++	if(q->ht)
++		kfree(q->ht);
++	if(q->dep)
++		kfree(q->dep);
++	if(q->next)
++		kfree(q->next);
++	if(q->allot)
++		kfree(q->allot);
++	if(q->hash)
++		kfree(q->hash);
++	if(q->qs)
++		kfree(q->qs);
++}
++
++static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++	struct esfq_sched_data *q = qdisc_priv(sch);
++	unsigned char	 *b = skb->tail;
++	struct tc_esfq_qopt opt;
++
++	opt.quantum = q->quantum;
++	opt.perturb_period = q->perturb_period/HZ;
++
++	opt.limit = q->limit;
++	opt.divisor = q->hash_divisor;
++	opt.flows = q->depth;
++	opt.hash_kind = q->hash_kind;
++
++	RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
++
++	return skb->len;
++
++rtattr_failure:
++	skb_trim(skb, b - skb->data);
++	return -1;
++}
++
++static struct Qdisc_ops esfq_qdisc_ops =
++{
++	.next		=	NULL,
++	.cl_ops		=	NULL,
++	.id		=	"esfq",
++	.priv_size	=	sizeof(struct esfq_sched_data),
++	.enqueue	=	esfq_enqueue,
++	.dequeue	=	esfq_dequeue,
++	.requeue	=	esfq_requeue,
++	.drop		=	esfq_drop,
++	.init		=	esfq_init,
++	.reset		=	esfq_reset,
++	.destroy	=	esfq_destroy,
++	.change		=	NULL, /* esfq_change - needs more work */
++	.dump		=	esfq_dump,
++	.owner		=	THIS_MODULE,
++};
++
++static int __init esfq_module_init(void)
++{
++	return register_qdisc(&esfq_qdisc_ops);
++}
++static void __exit esfq_module_exit(void) 
++{
++	unregister_qdisc(&esfq_qdisc_ops);
++}
++module_init(esfq_module_init)
++module_exit(esfq_module_exit)
++MODULE_LICENSE("GPL");
diff --git a/target/linux/generic-2.6/patches-2.6.22/201-multiple_default_gateways.patch b/target/linux/generic-2.6/patches-2.6.22/201-multiple_default_gateways.patch
new file mode 100644
index 0000000000..313b1102f8
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/201-multiple_default_gateways.patch
@@ -0,0 +1,339 @@
+diff -urN linux-2.6.21.1.old/include/linux/rtnetlink.h linux-2.6.21.1.dev/include/linux/rtnetlink.h
+--- linux-2.6.21.1.old/include/linux/rtnetlink.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/linux/rtnetlink.h	2007-05-26 20:46:11.883321560 +0200
+@@ -293,6 +293,8 @@
+ #define RTNH_F_DEAD		1	/* Nexthop is dead (used by multipath)	*/
+ #define RTNH_F_PERVASIVE	2	/* Do recursive gateway lookup	*/
+ #define RTNH_F_ONLINK		4	/* Gateway is forced on link	*/
++#define RTNH_F_SUSPECT		8	/* We don't know the real state	*/
++#define RTNH_F_BADSTATE		(RTNH_F_DEAD | RTNH_F_SUSPECT)
+ 
+ /* Macros to handle hexthops */
+ 
+diff -urN linux-2.6.21.1.old/include/net/flow.h linux-2.6.21.1.dev/include/net/flow.h
+--- linux-2.6.21.1.old/include/net/flow.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/net/flow.h	2007-05-26 20:46:11.922315632 +0200
+@@ -19,6 +19,8 @@
+ 		struct {
+ 			__be32			daddr;
+ 			__be32			saddr;
++			__u32			lsrc;
++			__u32			gw;
+ 			__u8			tos;
+ 			__u8			scope;
+ 		} ip4_u;
+@@ -43,6 +45,8 @@
+ #define fl6_flowlabel	nl_u.ip6_u.flowlabel
+ #define fl4_dst		nl_u.ip4_u.daddr
+ #define fl4_src		nl_u.ip4_u.saddr
++#define fl4_lsrc        nl_u.ip4_u.lsrc
++#define fl4_gw		nl_u.ip4_u.gw
+ #define fl4_tos		nl_u.ip4_u.tos
+ #define fl4_scope	nl_u.ip4_u.scope
+ 
+diff -urN linux-2.6.21.1.old/net/ipv4/route.c linux-2.6.21.1.dev/net/ipv4/route.c
+--- linux-2.6.21.1.old/net/ipv4/route.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/net/ipv4/route.c	2007-05-26 20:46:11.967308792 +0200
+@@ -1208,6 +1208,7 @@
+ 
+ 				/* Gateway is different ... */
+ 				rt->rt_gateway		= new_gw;
++				if (rt->fl.fl4_gw) rt->fl.fl4_gw = new_gw;
+ 
+ 				/* Redirect received -> path was valid */
+ 				dst_confirm(&rth->u.dst);
+@@ -1643,6 +1644,7 @@
+ 	rth->fl.fl4_tos	= tos;
+ 	rth->fl.mark    = skb->mark;
+ 	rth->fl.fl4_src	= saddr;
++	rth->fl.fl4_lsrc = 0;
+ 	rth->rt_src	= saddr;
+ #ifdef CONFIG_NET_CLS_ROUTE
+ 	rth->u.dst.tclassid = itag;
+@@ -1653,6 +1655,7 @@
+ 	dev_hold(rth->u.dst.dev);
+ 	rth->idev	= in_dev_get(rth->u.dst.dev);
+ 	rth->fl.oif	= 0;
++	rth->fl.fl4_gw	= 0;
+ 	rth->rt_gateway	= daddr;
+ 	rth->rt_spec_dst= spec_dst;
+ 	rth->rt_type	= RTN_MULTICAST;
+@@ -1716,7 +1719,7 @@
+ static inline int __mkroute_input(struct sk_buff *skb,
+ 				  struct fib_result* res,
+ 				  struct in_device *in_dev,
+-				  __be32 daddr, __be32 saddr, u32 tos,
++				  __be32 daddr, __be32 saddr, u32 tos, u32 lsrc,
+ 				  struct rtable **result)
+ {
+ 
+@@ -1751,6 +1754,7 @@
+ 		flags |= RTCF_DIRECTSRC;
+ 
+ 	if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
++	    !lsrc &&
+ 	    (IN_DEV_SHARED_MEDIA(out_dev) ||
+ 	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
+ 		flags |= RTCF_DOREDIRECT;
+@@ -1788,6 +1792,7 @@
+ 	rth->fl.mark    = skb->mark;
+ 	rth->fl.fl4_src	= saddr;
+ 	rth->rt_src	= saddr;
++	rth->fl.fl4_lsrc	= lsrc;
+ 	rth->rt_gateway	= daddr;
+ 	rth->rt_iif 	=
+ 		rth->fl.iif	= in_dev->dev->ifindex;
+@@ -1795,6 +1800,7 @@
+ 	dev_hold(rth->u.dst.dev);
+ 	rth->idev	= in_dev_get(rth->u.dst.dev);
+ 	rth->fl.oif 	= 0;
++	rth->fl.fl4_gw	= 0;
+ 	rth->rt_spec_dst= spec_dst;
+ 
+ 	rth->u.dst.input = ip_forward;
+@@ -1816,19 +1822,21 @@
+ 				       struct fib_result* res,
+ 				       const struct flowi *fl,
+ 				       struct in_device *in_dev,
+-				       __be32 daddr, __be32 saddr, u32 tos)
++				       __be32 daddr, __be32 saddr, u32 tos, 
++				       u32 lsrc)
+ {
+ 	struct rtable* rth = NULL;
+ 	int err;
+ 	unsigned hash;
+ 
++	fib_select_default(fl, res);
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+-	if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
++	if (res->fi && res->fi->fib_nhs > 1)
+ 		fib_select_multipath(fl, res);
+ #endif
+ 
+ 	/* create a routing cache entry */
+-	err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
++	err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, lsrc, &rth);
+ 	if (err)
+ 		return err;
+ 
+@@ -1841,7 +1849,8 @@
+ 				   struct fib_result* res,
+ 				   const struct flowi *fl,
+ 				   struct in_device *in_dev,
+-				   __be32 daddr, __be32 saddr, u32 tos)
++				   __be32 daddr, __be32 saddr, u32 tos, 
++				   u32 lsrc)
+ {
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+ 	struct rtable* rth = NULL, *rtres;
+@@ -1857,7 +1866,7 @@
+ 	/* distinguish between multipath and singlepath */
+ 	if (hopcount < 2)
+ 		return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
+-					    saddr, tos);
++					    saddr, tos, 0);
+ 
+ 	/* add all alternatives to the routing cache */
+ 	for (hop = 0; hop < hopcount; hop++) {
+@@ -1869,7 +1878,7 @@
+ 
+ 		/* create a routing cache entry */
+ 		err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos,
+-				      &rth);
++				      0, &rth);
+ 		if (err)
+ 			return err;
+ 
+@@ -1889,7 +1898,7 @@
+ 	skb->dst = &rtres->u.dst;
+ 	return err;
+ #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED  */
+-	return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos);
++	return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos, lsrc);
+ #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED  */
+ }
+ 
+@@ -1905,18 +1914,18 @@
+  */
+ 
+ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+-			       u8 tos, struct net_device *dev)
++			       u8 tos, struct net_device *dev, u32 lsrc)
+ {
+ 	struct fib_result res;
+ 	struct in_device *in_dev = in_dev_get(dev);
+ 	struct flowi fl = { .nl_u = { .ip4_u =
+ 				      { .daddr = daddr,
+-					.saddr = saddr,
++					.saddr = lsrc ? : saddr,
+ 					.tos = tos,
+ 					.scope = RT_SCOPE_UNIVERSE,
+ 				      } },
+ 			    .mark = skb->mark,
+-			    .iif = dev->ifindex };
++			    .iif = lsrc? loopback_dev.ifindex : dev->ifindex };
+ 	unsigned	flags = 0;
+ 	u32		itag = 0;
+ 	struct rtable * rth;
+@@ -1949,6 +1958,12 @@
+ 	if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
+ 		goto martian_destination;
+ 
++	if (lsrc) {
++		if (MULTICAST(lsrc) || BADCLASS(lsrc) ||
++		    ZERONET(lsrc) || LOOPBACK(lsrc))
++			goto e_inval;
++	}
++
+ 	/*
+ 	 *	Now we are ready to route packet.
+ 	 */
+@@ -1958,6 +1973,10 @@
+ 		goto no_route;
+ 	}
+ 	free_res = 1;
++	if (lsrc && res.type != RTN_UNICAST && res.type != RTN_NAT)
++		goto e_inval;
++	fl.iif = dev->ifindex;
++	fl.fl4_src = saddr;
+ 
+ 	RT_CACHE_STAT_INC(in_slow_tot);
+ 
+@@ -1982,7 +2001,7 @@
+ 	if (res.type != RTN_UNICAST)
+ 		goto martian_destination;
+ 
+-	err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
++	err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos, lsrc);
+ 	if (err == -ENOBUFS)
+ 		goto e_nobufs;
+ 	if (err == -EINVAL)
+@@ -1997,6 +2016,8 @@
+ brd_input:
+ 	if (skb->protocol != htons(ETH_P_IP))
+ 		goto e_inval;
++	if (lsrc)
++		goto e_inval;
+ 
+ 	if (ZERONET(saddr))
+ 		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
+@@ -2037,6 +2058,7 @@
+ 	rth->u.dst.dev	= &loopback_dev;
+ 	dev_hold(rth->u.dst.dev);
+ 	rth->idev	= in_dev_get(rth->u.dst.dev);
++	rth->fl.fl4_gw	= 0;
+ 	rth->rt_gateway	= daddr;
+ 	rth->rt_spec_dst= spec_dst;
+ 	rth->u.dst.input= ip_local_deliver;
+@@ -2086,8 +2108,9 @@
+ 	goto e_inval;
+ }
+ 
+-int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+-		   u8 tos, struct net_device *dev)
++static inline int
++ip_route_input_cached(struct sk_buff *skb, __be32 daddr, __be32 saddr,
++		   u8 tos, struct net_device *dev, u32 lsrc)
+ {
+ 	struct rtable * rth;
+ 	unsigned	hash;
+@@ -2102,6 +2125,7 @@
+ 		if (rth->fl.fl4_dst == daddr &&
+ 		    rth->fl.fl4_src == saddr &&
+ 		    rth->fl.iif == iif &&
++		    rth->fl.fl4_lsrc == lsrc &&
+ 		    rth->fl.oif == 0 &&
+ 		    rth->fl.mark == skb->mark &&
+ 		    rth->fl.fl4_tos == tos) {
+@@ -2148,7 +2172,19 @@
+ 		rcu_read_unlock();
+ 		return -EINVAL;
+ 	}
+-	return ip_route_input_slow(skb, daddr, saddr, tos, dev);
++	return ip_route_input_slow(skb, daddr, saddr, tos, dev, lsrc);
++}
++
++int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
++		   u8 tos, struct net_device *dev)
++{
++	return ip_route_input_cached(skb, daddr, saddr, tos, dev, 0);
++}
++
++int ip_route_input_lookup(struct sk_buff *skb, u32 daddr, u32 saddr,
++			  u8 tos, struct net_device *dev, u32 lsrc)
++{
++	return ip_route_input_cached(skb, daddr, saddr, tos, dev, lsrc);
+ }
+ 
+ static inline int __mkroute_output(struct rtable **result,
+@@ -2227,6 +2263,7 @@
+ 	rth->fl.fl4_tos	= tos;
+ 	rth->fl.fl4_src	= oldflp->fl4_src;
+ 	rth->fl.oif	= oldflp->oif;
++	rth->fl.fl4_gw  = oldflp->fl4_gw;
+ 	rth->fl.mark    = oldflp->mark;
+ 	rth->rt_dst	= fl->fl4_dst;
+ 	rth->rt_src	= fl->fl4_src;
+@@ -2367,6 +2404,7 @@
+ 	struct flowi fl = { .nl_u = { .ip4_u =
+ 				      { .daddr = oldflp->fl4_dst,
+ 					.saddr = oldflp->fl4_src,
++					.gw = oldflp->fl4_gw,
+ 					.tos = tos & IPTOS_RT_MASK,
+ 					.scope = ((tos & RTO_ONLINK) ?
+ 						  RT_SCOPE_LINK :
+@@ -2470,6 +2508,7 @@
+ 		dev_out = &loopback_dev;
+ 		dev_hold(dev_out);
+ 		fl.oif = loopback_dev.ifindex;
++		fl.fl4_gw = 0;
+ 		res.type = RTN_LOCAL;
+ 		flags |= RTCF_LOCAL;
+ 		goto make_route;
+@@ -2477,7 +2516,7 @@
+ 
+ 	if (fib_lookup(&fl, &res)) {
+ 		res.fi = NULL;
+-		if (oldflp->oif) {
++		if (oldflp->oif && dev_out->flags & IFF_UP) {
+ 			/* Apparently, routing tables are wrong. Assume,
+ 			   that the destination is on link.
+ 
+@@ -2517,6 +2556,7 @@
+ 		dev_out = &loopback_dev;
+ 		dev_hold(dev_out);
+ 		fl.oif = dev_out->ifindex;
++		fl.fl4_gw = 0;
+ 		if (res.fi)
+ 			fib_info_put(res.fi);
+ 		res.fi = NULL;
+@@ -2524,13 +2564,12 @@
+ 		goto make_route;
+ 	}
+ 
++	if (res.type == RTN_UNICAST)
++		fib_select_default(&fl, &res);
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+-	if (res.fi->fib_nhs > 1 && fl.oif == 0)
++	if (res.fi->fib_nhs > 1)
+ 		fib_select_multipath(&fl, &res);
+-	else
+ #endif
+-	if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
+-		fib_select_default(&fl, &res);
+ 
+ 	if (!fl.fl4_src)
+ 		fl.fl4_src = FIB_RES_PREFSRC(res);
+@@ -2567,6 +2606,7 @@
+ 		    rth->fl.fl4_src == flp->fl4_src &&
+ 		    rth->fl.iif == 0 &&
+ 		    rth->fl.oif == flp->oif &&
++		    rth->fl.fl4_gw == flp->fl4_gw &&
+ 		    rth->fl.mark == flp->mark &&
+ 		    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
+ 			    (IPTOS_RT_MASK | RTO_ONLINK))) {
+@@ -3271,3 +3311,4 @@
+ EXPORT_SYMBOL(__ip_select_ident);
+ EXPORT_SYMBOL(ip_route_input);
+ EXPORT_SYMBOL(ip_route_output_key);
++EXPORT_SYMBOL(ip_route_input_lookup);
diff --git a/target/linux/generic-2.6/patches-2.6.22/202-mips-freestanding.patch b/target/linux/generic-2.6/patches-2.6.22/202-mips-freestanding.patch
new file mode 100644
index 0000000000..cfc7b01dcf
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/202-mips-freestanding.patch
@@ -0,0 +1,13 @@
+diff -urN linux-2.6.21.1.old/arch/mips/Makefile linux-2.6.21.1.dev/arch/mips/Makefile
+--- linux-2.6.21.1.old/arch/mips/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/arch/mips/Makefile	2007-05-26 20:49:09.358341256 +0200
+@@ -589,6 +589,9 @@
+ core-$(CONFIG_TOSHIBA_RBTX4938) += arch/mips/tx4938/common/
+ load-$(CONFIG_TOSHIBA_RBTX4938) += 0xffffffff80100000
+ 
++# temporary until string.h is fixed
++cflags-y += -ffreestanding
++
+ cflags-y			+= -Iinclude/asm-mips/mach-generic
+ drivers-$(CONFIG_PCI)		+= arch/mips/pci/
+ 
diff --git a/target/linux/generic-2.6/patches-2.6.22/204-jffs2_eofdetect.patch b/target/linux/generic-2.6/patches-2.6.22/204-jffs2_eofdetect.patch
new file mode 100644
index 0000000000..b96e574e13
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/204-jffs2_eofdetect.patch
@@ -0,0 +1,58 @@
+diff -urN linux-2.6.21.1.old/fs/jffs2/build.c linux-2.6.21.1.dev/fs/jffs2/build.c
+--- linux-2.6.21.1.old/fs/jffs2/build.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/fs/jffs2/build.c	2007-05-26 20:52:04.425726968 +0200
+@@ -105,6 +105,17 @@
+ 	dbg_fsbuild("scanned flash completely\n");
+ 	jffs2_dbg_dump_block_lists_nolock(c);
+ 
++	if (c->flags & (1 << 7)) {
++		printk("%s(): unlocking the mtd device... ", __func__);
++		if (c->mtd->unlock)
++			c->mtd->unlock(c->mtd, 0, c->mtd->size);
++		printk("done.\n");
++		
++		printk("%s(): erasing all blocks after the end marker... ", __func__);
++		jffs2_erase_pending_blocks(c, -1);
++		printk("done.\n");
++	}
++
+ 	dbg_fsbuild("pass 1 starting\n");
+ 	c->flags |= JFFS2_SB_FLAG_BUILDING;
+ 	/* Now scan the directory tree, increasing nlink according to every dirent found. */
+diff -urN linux-2.6.21.1.old/fs/jffs2/scan.c linux-2.6.21.1.dev/fs/jffs2/scan.c
+--- linux-2.6.21.1.old/fs/jffs2/scan.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/fs/jffs2/scan.c	2007-05-26 20:52:04.491716936 +0200
+@@ -142,9 +142,12 @@
+ 
+ 		/* reset summary info for next eraseblock scan */
+ 		jffs2_sum_reset_collected(s);
+-
+-		ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+-						buf_size, s);
++		
++		if (c->flags & (1 << 7))
++			ret = BLK_STATE_ALLFF;
++		else
++			ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
++							buf_size, s);
+ 
+ 		if (ret < 0)
+ 			goto out;
+@@ -545,6 +548,17 @@
+ 			return err;
+ 	}
+ 
++	if ((buf[0] == 0xde) &&
++		(buf[1] == 0xad) &&
++		(buf[2] == 0xc0) &&
++		(buf[3] == 0xde)) {
++		/* end of filesystem. erase everything after this point */
++		printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
++		c->flags |= (1 << 7);
++
++		return BLK_STATE_ALLFF;
++	}
++	
+ 	/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+ 	ofs = 0;
+ 
diff --git a/target/linux/generic-2.6/patches-2.6.22/207-powerpc_asm_segment_h.patch b/target/linux/generic-2.6/patches-2.6.22/207-powerpc_asm_segment_h.patch
new file mode 100644
index 0000000000..4640c9f4e4
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/207-powerpc_asm_segment_h.patch
@@ -0,0 +1,10 @@
+diff -urN linux-2.6.21.1.old/include/asm-powerpc/segment.h linux-2.6.21.1.dev/include/asm-powerpc/segment.h
+--- linux-2.6.21.1.old/include/asm-powerpc/segment.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/include/asm-powerpc/segment.h	2007-05-26 20:55:12.738099136 +0200
+@@ -0,0 +1,6 @@
++#ifndef _ASM_SEGMENT_H
++#define _ASM_SEGMENT_H
++
++/* Only here because we have some old header files that expect it.. */
++
++#endif /* _ASM_SEGMENT_H */
diff --git a/target/linux/generic-2.6/patches-2.6.22/208-rtl8110sb_fix.patch b/target/linux/generic-2.6/patches-2.6.22/208-rtl8110sb_fix.patch
new file mode 100644
index 0000000000..d6705ddf41
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/208-rtl8110sb_fix.patch
@@ -0,0 +1,25 @@
+diff -urN linux-2.6.21.1.old/drivers/net/r8169.c linux-2.6.21.1.dev/drivers/net/r8169.c
+--- linux-2.6.21.1.old/drivers/net/r8169.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/drivers/net/r8169.c	2007-05-26 20:58:17.370030816 +0200
+@@ -494,7 +494,7 @@
+ #endif
+ 
+ static const u16 rtl8169_intr_mask =
+-	SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
++	LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
+ static const u16 rtl8169_napi_event =
+ 	RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
+ static const unsigned int rtl8169_rx_config =
+@@ -2640,10 +2640,12 @@
+ 		if (!(status & rtl8169_intr_mask))
+ 			break;
+ 
++#if 0
+ 		if (unlikely(status & SYSErr)) {
+ 			rtl8169_pcierr_interrupt(dev);
+ 			break;
+ 		}
++#endif
+ 
+ 		if (status & LinkChg)
+ 			rtl8169_check_link_status(dev, tp, ioaddr);
diff --git a/target/linux/generic-2.6/patches-2.6.22/209-mini_fo.patch b/target/linux/generic-2.6/patches-2.6.22/209-mini_fo.patch
new file mode 100644
index 0000000000..f542fd45c2
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/209-mini_fo.patch
@@ -0,0 +1,7807 @@
+diff -urN linux-2.6.21.1.old/fs/Kconfig linux-2.6.21.1.dev/fs/Kconfig
+--- linux-2.6.21.1.old/fs/Kconfig	2007-05-26 19:03:45.497714200 +0200
++++ linux-2.6.21.1.dev/fs/Kconfig	2007-05-26 21:01:26.154331240 +0200
+@@ -461,6 +461,9 @@
+ 	  This option will enlarge your kernel, but it allows debugging of
+ 	  ocfs2 filesystem issues.
+ 
++config MINI_FO
++	tristate "Mini fanout overlay filesystem"
++
+ config MINIX_FS
+ 	tristate "Minix fs support"
+ 	help
+diff -urN linux-2.6.21.1.old/fs/Makefile linux-2.6.21.1.dev/fs/Makefile
+--- linux-2.6.21.1.old/fs/Makefile	2007-05-26 19:03:45.497714200 +0200
++++ linux-2.6.21.1.dev/fs/Makefile	2007-05-26 21:01:26.160330328 +0200
+@@ -76,6 +76,7 @@
+ obj-$(CONFIG_RAMFS)		+= ramfs/
+ obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
+ obj-$(CONFIG_CODA_FS)		+= coda/
++obj-$(CONFIG_MINI_FO)		+= mini_fo/
+ obj-$(CONFIG_MINIX_FS)		+= minix/
+ obj-$(CONFIG_FAT_FS)		+= fat/
+ obj-$(CONFIG_MSDOS_FS)		+= msdos/
+diff -urN linux-2.6.21.1.old/fs/mini_fo/aux.c linux-2.6.21.1.dev/fs/mini_fo/aux.c
+--- linux-2.6.21.1.old/fs/mini_fo/aux.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/aux.c	2007-05-26 21:01:26.160330328 +0200
+@@ -0,0 +1,580 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++/*
++ *  $Id$
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif
++
++#include "fist.h"
++#include "mini_fo.h"
++
++/* check if file exists in storage  */
++int exists_in_storage(dentry_t *dentry)
++{
++	check_mini_fo_dentry(dentry);
++	if(dtost(dentry) == MODIFIED || dtost(dentry) == CREATED || dtost(dentry) == DEL_REWRITTEN)
++		return 1;
++	return 0;	
++}
++
++/* check if dentry is in an existing state */
++int is_mini_fo_existant(dentry_t *dentry) 
++{
++	check_mini_fo_dentry(dentry);
++
++	if(dtost(dentry) == DELETED || dtost(dentry) == NON_EXISTANT)
++		return 0;
++	else
++		return 1;
++}
++
++/* 
++ * This function will create a negative storage dentry for 
++ * dentry, what is required for many create like options.
++ * It will create the storage structure if necessary.
++ */
++int get_neg_sto_dentry(dentry_t *dentry) 
++{
++	int err = 0;
++	unsigned int len;
++	const unsigned char *name;
++
++	if(!dentry ||
++	   !dtopd(dentry) ||
++	   !(dtost(dentry) == UNMODIFIED ||
++	     dtost(dentry) == NON_EXISTANT ||
++	     dtost(dentry) == DELETED)) {
++		printk(KERN_CRIT "mini_fo: get_neg_sto_dentry: invalid dentry passed.\n");
++		err = -1;
++		goto out;
++	}
++	/* Have we got a neg. dentry already? */
++	if(dtohd2(dentry)) {
++		err = 0;
++		goto out;
++	}
++	if(dtost(dentry->d_parent) == UNMODIFIED) {
++		/* build sto struct */
++		err = build_sto_structure(dentry->d_parent->d_parent, dentry->d_parent);
++		if(err || 
++		   dtost(dentry->d_parent) != MODIFIED) {
++			printk(KERN_CRIT "mini_fo: get_neg_sto_dentry: ERROR building sto structure.\n");
++			err = -1;
++			goto out;
++		}		
++	}
++
++	len = dentry->d_name.len;
++	name = dentry->d_name.name;
++	 
++	dtohd2(dentry) = 
++		lookup_one_len(name, dtohd2(dentry->d_parent), len);
++
++ out:
++	return err;
++}
++
++int check_mini_fo_dentry(dentry_t *dentry)
++{
++ 	ASSERT(dentry != NULL);
++	ASSERT(dtopd(dentry) != NULL);
++	ASSERT((dtohd(dentry) != NULL) || (dtohd2(dentry) != NULL));
++	       
++/* 	if(dtost(dentry) == MODIFIED) { */
++/* 		ASSERT(dentry->d_inode != NULL); */
++/* 		ASSERT(dtohd(dentry) != NULL); */
++/* 		ASSERT(dtohd(dentry)->d_inode != NULL); */
++/* 		ASSERT(dtohd2(dentry) != NULL); */
++/* 		ASSERT(dtohd2(dentry)->d_inode != NULL); */
++/* 	} */
++/* 	else if(dtost(dentry) == UNMODIFIED) { */
++/* 		ASSERT(dentry->d_inode != NULL); */
++/* 		ASSERT( */
++/* 	} */
++	return 0;	       
++}
++
++int check_mini_fo_file(file_t *file)
++{
++	ASSERT(file != NULL);
++	ASSERT(ftopd(file) != NULL);
++	ASSERT(file->f_dentry != NULL);
++	
++	/* violent checking, check depending of state and type 
++	 *	if(S_ISDIR(file->f_dentry->d_inode->i_mode)) {}
++	 */
++	ASSERT((ftohf(file) != NULL) || (ftohf2(file) != NULL));
++	return 0;
++}
++
++int check_mini_fo_inode(inode_t *inode)
++{
++	ASSERT(inode != NULL);
++	ASSERT(itopd(inode) != NULL);
++	ASSERT((itohi(inode) != NULL) || (itohi2(inode) != NULL));
++	return 0;
++}
++
++/* 
++ * will walk a base path as provided by get_mini_fo_bpath and return
++ * the (hopefully ;-) ) positive dentry of the renamed base dir.
++ *
++ * This does some work of path_init.
++ */
++dentry_t *bpath_walk(super_block_t *sb, char *bpath) 
++{
++	int err;
++	struct nameidata nd;
++
++	/* be paranoid */
++	if(!bpath || bpath[0] != '/') {
++		printk(KERN_CRIT "mini_fo: bpath_walk: Invalid string.\n");
++		return NULL;
++	}
++	if(!sb || !stopd(sb)) {
++		printk(KERN_CRIT "mini_fo: bpath_walk: Invalid sb.\n");
++		return NULL;
++	}
++	
++	/* setup nd as path_init does */
++	nd.last_type = LAST_ROOT; /* if there are only slashes... */
++	nd.flags = LOOKUP_FOLLOW;
++	/* fix this: how do I reach this lock? 
++	 * read_lock(&current->fs->lock); */
++	nd.mnt = mntget(stopd(sb)->hidden_mnt);
++	nd.dentry = dget(stopd(sb)->base_dir_dentry);
++	/* read_unlock(&current->fs->lock); */
++	
++	err = path_walk(bpath+1, &nd);
++
++	/* validate */
++	if (err || !nd.dentry || !nd.dentry->d_inode) {
++		printk(KERN_CRIT "mini_fo: bpath_walk: path_walk failed.\n");
++		return NULL;
++	}
++	return nd.dentry;
++}
++
++
++/* returns the full path of the basefile incl. its name */
++int get_mini_fo_bpath(dentry_t *dentry, char **bpath, int *bpath_len)
++{
++	char *buf_walker;
++	int len = 0;
++	dentry_t *sky_walker;
++	
++	if(!dentry || !dtohd(dentry)) {
++		printk(KERN_CRIT "mini_fo: get_mini_fo_bpath: invalid dentry passed.\n");
++		return -1;
++	}
++	sky_walker = dtohd(dentry);
++
++	do {
++		len += sky_walker->d_name.len + 1 ; /* 1 for '/' */
++		sky_walker = sky_walker->d_parent;
++	} while(sky_walker != stopd(dentry->d_inode->i_sb)->base_dir_dentry);
++
++	/* 1 to oil the loop */
++	*bpath = (char*)  kmalloc(len + 1, GFP_KERNEL);
++	if(!*bpath) {
++		printk(KERN_CRIT "mini_fo: get_mini_fo_bpath: out of mem.\n");
++		return -1;
++	}
++	buf_walker = *bpath+len; /* put it on last char */
++	*buf_walker = '\n';
++	sky_walker = dtohd(dentry);
++	
++	do {
++		buf_walker -= sky_walker->d_name.len;
++		strncpy(buf_walker, 
++			sky_walker->d_name.name, 
++			sky_walker->d_name.len);
++		*(--buf_walker) = '/';
++		sky_walker = sky_walker->d_parent;
++	} while(sky_walker != stopd(dentry->d_inode->i_sb)->base_dir_dentry);
++
++	/* bpath_len doesn't count newline! */
++	*bpath_len = len;
++ 	return 0;
++}
++
++int mini_fo_cp_cont(dentry_t *tgt_dentry, struct vfsmount *tgt_mnt,
++		    dentry_t *src_dentry, struct vfsmount *src_mnt)
++{
++	void *buf;
++	mm_segment_t old_fs;
++	file_t *tgt_file;
++	file_t *src_file;
++	int bytes, len, tmp, err;
++	err = 0;
++
++	if(!(tgt_dentry->d_inode && src_dentry->d_inode)) {
++		printk(KERN_CRIT "mini_fo_cp_cont: ERROR, neg. dentry passed.\n");
++		err = -EINVAL;
++		goto out;
++	}
++
++	dget(tgt_dentry);
++	dget(src_dentry);
++	mntget(tgt_mnt);
++	mntget(src_mnt);
++
++	/* open file write only */
++	tgt_file = dentry_open(tgt_dentry, tgt_mnt, 0x1);
++	if(!tgt_file || IS_ERR(tgt_file)) {
++		printk(KERN_CRIT "mini_fo_cp_cont: ERROR opening target file.\n");
++		err = PTR_ERR(tgt_file);
++		goto out_err;
++	}
++
++	/* open file read only */
++	src_file = dentry_open(src_dentry, src_mnt, 0x0);
++	if(!src_file || IS_ERR(src_file)) {
++		printk(KERN_CRIT "mini_fo_cp_cont: ERROR opening source file.\n");
++		err = PTR_ERR(src_file);
++
++		/* close target file */
++		fput(tgt_file);
++		goto out_err;
++	}
++
++	/* check if the filesystem(s) support read respective write */
++	if(!src_file->f_op->read || !tgt_file->f_op->write) {
++		printk(KERN_CRIT "mini_fo_cp_cont: ERROR, no fs read or write support.\n");
++		err = -EPERM;
++		goto out_close;
++	}
++
++	/* allocate a page for transfering the data */
++	buf = (void *) __get_free_page(GFP_KERNEL);
++	if(!buf) {
++		printk(KERN_CRIT "mini_fo_cp_cont: ERROR, out of kernel mem.\n");
++		goto out_err;
++	}
++
++	tgt_file->f_pos = 0;
++	src_file->f_pos = 0;
++
++	old_fs = get_fs();
++	set_fs(KERNEL_DS);
++
++	/* Doing this I assume that a read operation will return a full
++	 * buffer while there is still data to read, and a less than
++	 * full buffer when all data has been read.
++	 */
++	bytes = len = PAGE_SIZE;
++	while(bytes == len) {
++		bytes = src_file->f_op->read(src_file, buf, len, 
++					     &src_file->f_pos);
++		tmp = tgt_file->f_op->write(tgt_file, buf, bytes, 
++					    &tgt_file->f_pos);
++		if(tmp != bytes) {
++			printk(KERN_CRIT "mini_fo_cp_cont: ERROR writing.\n");
++			goto out_close_unset;
++		}
++	}
++
++	free_page((unsigned long) buf);
++	set_fs(old_fs);
++	fput(tgt_file);
++	fput(src_file);
++	goto out;
++
++ out_close_unset:
++	free_page((unsigned long) buf);
++	set_fs(old_fs);
++
++ out_close:
++	fput(tgt_file);
++	fput(src_file);
++
++ out_err:
++	dput(tgt_dentry);
++	dput(src_dentry);
++
++	/* mk: not sure if this need to be done */
++	mntput(tgt_mnt);
++	mntput(src_mnt);
++
++ out:
++	return err;
++}
++
++/* mk:
++ * ndl (no-duplicate list) stuff
++ * This is used in mini_fo_readdir, to save the storage directory contents
++ * and later when reading base, match them against the list in order
++ * to avoid duplicates.
++ */
++
++/* add a file specified by name and len to the ndl
++ * Return values: 0 on success, <0 on failure.
++ */
++int ndl_add_entry(struct readdir_data *rd, const char *name, int len)
++{
++	struct ndl_entry *tmp_entry;
++
++	tmp_entry = (struct ndl_entry *) 
++		kmalloc(sizeof(struct ndl_entry), GFP_KERNEL);
++	if(!tmp_entry) {
++                printk(KERN_CRIT "mini_fo: ndl_add_entry: out of mem.\n");
++                return -ENOMEM;
++        }
++        tmp_entry->name = (char*) kmalloc(len, GFP_KERNEL);
++        if(!tmp_entry->name) {
++                printk(KERN_CRIT "mini_fo: ndl_add_entry: out of mem.\n");
++                return -ENOMEM;
++        }
++	strncpy(tmp_entry->name, name, len);
++        tmp_entry->len = len;
++
++        list_add(&tmp_entry->list, &rd->ndl_list);
++        rd->ndl_size++;
++        return 0;
++}
++
++/* delete all list entries and free memory */
++void ndl_put_list(struct readdir_data *rd)
++{
++	struct list_head *tmp;
++	struct ndl_entry *tmp_entry;
++
++	if(rd->ndl_size <= 0)
++		return;
++	while(!list_empty(&rd->ndl_list)) {
++		tmp = rd->ndl_list.next;
++                list_del(tmp);
++                tmp_entry = list_entry(tmp, struct ndl_entry, list);
++		kfree(tmp_entry->name);
++                kfree(tmp_entry);
++        }
++	rd->ndl_size = 0;
++}
++
++/* Check if a file specified by name and len is in the ndl
++ * Return value: 0 if not in list, 1 if file is found in ndl.
++ */
++int ndl_check_entry(struct readdir_data *rd, const char *name, int len)
++{
++	struct list_head *tmp;
++	struct ndl_entry *tmp_entry;
++
++	if(rd->ndl_size <= 0)
++		return 0;
++
++	list_for_each(tmp, &rd->ndl_list) {
++                tmp_entry = list_entry(tmp, struct ndl_entry, list);
++                if(tmp_entry->len != len)
++                        continue;
++                if(!strncmp(tmp_entry->name, name, len))
++                        return 1;
++        }
++        return 0;
++}
++
++/* mk:
++ * Recursive function to create corresponding directorys in the storage fs.
++ * The function will build the storage directorys up to dentry.
++ */
++int build_sto_structure(dentry_t *dir, dentry_t *dentry)
++{
++	int err;
++	dentry_t *hidden_sto_dentry;
++	dentry_t *hidden_sto_dir_dentry;
++
++	if(dentry->d_parent != dir) {
++		printk(KERN_CRIT "mini_fo: build_sto_structure: invalid parameter or meta data corruption [1].\n");
++		return 1;
++	}
++
++       	if(dtost(dir) != MODIFIED) {
++		err = build_sto_structure(dir->d_parent, dentry->d_parent);
++		if(err)
++			return err;
++	}
++
++	/* ok, coming back again. */
++	check_mini_fo_dentry(dentry);
++	hidden_sto_dentry = dtohd2(dentry);
++
++	if(!hidden_sto_dentry) {
++		/*
++		 * This is the case after creating the first 
++		 * hidden_sto_dentry.
++		 * After one negative storage_dentry, all pointers to 
++		 * hidden_storage dentries are set to NULL. We need to
++		 * create the negative dentry before we create the storage
++		 * file.
++		 */
++		unsigned int len;
++		const unsigned char *name;
++		len = dtohd(dentry)->d_name.len;
++		name = dtohd(dentry)->d_name.name;
++		hidden_sto_dentry = lookup_one_len(name, dtohd2(dir), len);
++		dtohd2(dentry) = hidden_sto_dentry;
++	}
++
++	/* was:	hidden_sto_dir_dentry = lock_parent(hidden_sto_dentry); */
++	hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	/* lets be safe */
++	if(dtohd2(dir) != hidden_sto_dir_dentry) {
++		printk(KERN_CRIT "mini_fo: build_sto_structure: invalid parameter or meta data corruption [2].\n");
++		return 1;
++	}
++
++	/* check for errors in lock_parent */
++	err = PTR_ERR(hidden_sto_dir_dentry);
++	if(IS_ERR(hidden_sto_dir_dentry)) {
++		printk(KERN_CRIT "mini_fo: build_sto_structure: lock_parent failed.\n");
++		return err;
++	}
++
++	err = vfs_mkdir(hidden_sto_dir_dentry->d_inode,
++			hidden_sto_dentry,
++			dir->d_inode->i_mode);
++
++	if(err) {
++		printk(KERN_CRIT "mini_fo: build_sto_structure: failed to create storage dir [1].\n");
++		/* was: unlock_dir(dir); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&dir->d_inode->i_mutex);
++#else
++		up(&dir->d_inode->i_sem);
++#endif
++		dput(dir);
++		return err;
++	}
++	
++	/* everything ok! */
++	if(!dtohd2(dentry)->d_inode) {
++		printk(KERN_CRIT "mini_fo: build_sto_structure: failed to create storage dir [2].\n");
++		/* was: unlock_dir(dir); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&dir->d_inode->i_mutex);
++#else
++		up(&dir->d_inode->i_sem);
++#endif
++		dput(dir);
++		return 1;
++	}
++
++	/* interpose the new inode and set new state */
++	itohi2(dentry->d_inode) = igrab(dtohd2(dentry)->d_inode);
++	dtopd(dentry)->state = MODIFIED;
++
++	/* initalize the wol list */
++	itopd(dentry->d_inode)->deleted_list_size = -1;
++	itopd(dentry->d_inode)->renamed_list_size = -1;
++	meta_build_lists(dentry);
++	
++	fist_copy_attr_all(dentry->d_inode, itohi2(dentry->d_inode));
++	fist_copy_attr_timesizes(dir->d_inode, 
++				 hidden_sto_dir_dentry->d_inode);
++	dir->d_inode->i_nlink++;
++	/* was: unlock_dir(hidden_sto_dir_dentry); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	dput(hidden_sto_dir_dentry);
++	return 0;
++}
++
++
++#if 0 /* unused */
++
++/*
++ * Read "len" bytes from "filename" into "buf".
++ * "buf" is in kernel space.
++ */
++int
++mini_fo_read_file(const char *filename, void *buf, int len)
++{
++    file_t *filp;
++    mm_segment_t oldfs;
++    int bytes;
++    /* Chroot? Maybe NULL isn't right here */
++    filp = filp_open(filename, O_RDONLY, 0);
++    if (!filp || IS_ERR(filp)) {
++	printk("mini_fo_read_file err %d\n", (int) PTR_ERR(filp));
++	return -1;  /* or do something else */
++    }
++
++    if (!filp->f_op->read)
++	return -2;  /* file(system) doesn't allow reads */
++
++    /* now read len bytes from offset 0 */
++    filp->f_pos = 0;		/* start offset */
++    oldfs = get_fs();
++    set_fs(KERNEL_DS);
++    bytes = filp->f_op->read(filp, buf, len, &filp->f_pos);
++    set_fs(oldfs);
++
++    /* close the file */
++    fput(filp);
++
++    return bytes;
++}
++
++
++
++/*
++ * Write "len" bytes from "buf" to "filename"
++ * "buf" is in kernel space.
++ */
++int
++mini_fo_write_file(const char *filename, void *buf, int len)
++{
++    file_t *filp;
++    mm_segment_t oldfs;
++    int bytes;
++				/* Chroot? Maybe NULL isn't right here */
++    filp = filp_open(filename, O_RDWR|O_CREAT, 0640);
++    if (!filp || IS_ERR(filp)) {
++	printk("mini_fo_write_file err %d\n", (int) PTR_ERR(filp));
++	return -1;  /* or do something else */
++    }
++
++    if (!filp->f_op->write)
++	return -2;  /* file(system) doesn't allow writes */
++
++    /* now write len bytes from offset 0 */
++    filp->f_pos = 0;		/* start offset */
++    oldfs = get_fs();
++    set_fs(KERNEL_DS);
++    bytes = filp->f_op->write(filp, buf, len, &filp->f_pos);
++    set_fs(oldfs);
++
++    /* close the file */
++    fput(filp);
++
++    return bytes;
++}
++
++#endif /* unused */
++
+diff -urN linux-2.6.21.1.old/fs/mini_fo/ChangeLog linux-2.6.21.1.dev/fs/mini_fo/ChangeLog
+--- linux-2.6.21.1.old/fs/mini_fo/ChangeLog	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/ChangeLog	2007-05-26 21:01:26.161330176 +0200
+@@ -0,0 +1,281 @@
++2006-01-24  Markus Klotzbuecher  <mk@mary.denx.de>
++
++	* Add tons of ugly ifdefs to Ed L. Cashin's mutex patch to
++          retain backwards compatibility.
++	
++2006-01-24  Ed L. Cashin <ecashin@coraid.com>
++
++	* Support for the new mutex infrastructure
++	(7892f2f48d165a34b0b8130c8a195dfd807b8cb6)
++
++2005-10-15  Markus Klotzbuecher  <mk@localhost.localdomain>
++
++	* Bugfix for a serious memory leak in mini_fo_follow_link.
++
++2005-09-21  Markus Klotzbuecher  <mk@mary>
++
++	* new release 0.6.1
++
++	* fix of a compiler warning due to changes in 2.6.13
++
++2005-09-21  Klaus Wenninger  <klaus.wenninger@siemens.com>
++
++	* file.c: readdir: fix for a bug that caused directory entries
++          to show up twice when using storage filesystems such as
++          minixfs or pramfs.
++
++2005-06-30  Eric Lammerts <eric@lammerts.org>
++
++	* fix for an oops when overwriting a binary thats beeing
++          executed.
++
++2005-06-09    <mk@mary>
++
++	* Renamed overlay to mini_fo-overlay.
++
++	* Added mini_fo-merge script to allow merging of storage and base
++	after making modifications.
++
++2005-05-22  root  <mk@mary>
++
++	* Added overlay script that allows to easily mount mini_fo ontop
++	of a given base directory
++
++2005-05-10    <mk@mary>
++
++	* inode.c: xattr functions return -EOPNOSUPP instead of
++          -ENOSUPP, what confuses "ls -l"
++
++	* Changed license from LGPL to GPL.
++
++2005-05-08  root  <mk@mary>
++
++	* Makefile: clean it up and added make install and make
++          uninstall.
++	
++2005-05-06    <mk@mary>
++
++	* merged devel branch back to main. [v0-6-0-pre3]
++
++	* removed unused files print.c and fist_ioctl. [devel-0-0-18]
++
++	* ioctl: removed fist_ioctl stuff, that is not needed for
++          now.
++
++2005-05-03    <mk@mary>
++
++	* file.c: simplified mini_fo_open and mini_fo_setattr using
++          new state changing functions. [devel-0-0-17]
++
++	* inode.c: Fixed getattr state bug (see below) in 2.4 function
++          mini_fo_inode revalidate.
++
++	* inode.c: found an other bug in mini_fo_getattr. States are not
++	  reliable in this function, as a file can be opened, unlinked and
++	  the getattr function called. This results in a deleted dentry
++	  with an inode. Fix is to ignore states and simply use the inode
++	  available.
++
++2005-04-29    <mk@mary>
++
++	* file.c: Bugfix and cleanup in fasync and fsync. [devel-0-0-16]
++
++	* file.c: do not use mini_fo_lock so the generic version is
++          used (I guess).
++
++	* inode.c: getattr, never call getattr on lower files, as this
++          will cause the inum to change.
++
++	* inode.c: rename_reg_file renamed to rename_nondir, as it
++          doesn't matter as long it't not a dir. Removed all
++          rename_dev_file etc. 	  
++
++	* tagged as devel-0-0-15
++
++	* inode.c: added support for chosing support for extended
++          attrs at compile time by XATTR define in mini_fo.h .
++
++	* inode.c: fixed mini_fo_getattr to use mini_fo inode and not
++          lower again, what avoids inode number changes that confused
++          rm again. This is the proper solution.
++
++2005-04-24    <mk@mary>
++
++	* all files: updated Copyright notive to 2005. [devel-0-0-14]
++
++	* inode.c: fixed mini_fo_getattr to not change the inode
++          number, even if lower files change.
++
++	* super.c: fixed a bug that caused deleted base file to show
++          up suddenly after some time, or after creating a special
++          file. The problem was that after some time or after special
++          file creating sync_sb_inodes is called by the vfs, that
++          called our mini_fo_put_inode. There was (wrongly) called
++          __meta_put_lists, that nuked the lists, although the inode
++          was going to continue its life. Moving __meta_put_lists to
++          mini_fo_clear_inode, where an inode is really destroyed,
++          solved the problem.
++
++
++2005-04-23    <mk@mary>
++
++	* state.c, aux.c: more cleaning up and
++          simplifications. [devel-0-0-13] 
++
++	* inode.c: implemented mini_fo_getattr, that was required for
++          2.6 because inode_revalidate has been remove there, and the
++	  old "du" bug returned.
++
++
++2005-04-20    <mk@mary>
++
++	* aux.c: get_neg_sto_dentry(): allow to be called for dentries
++          in state UNMODIFIED, NON_EXISTANT _and_ DELETED.
++
++2005-04-19    <mk@mary>
++
++	* Fixed a bug under 2.6 that caused files deleted via mini_fo
++          not to be deleted properly and therefore the fs filled up
++          untill no memory was left. [devel-0-0-12]
++
++	* Added basic hard link support. This means that creating
++          hardlinks will work, but existing ones will be treated as
++          individual files. [devel-0-0-11]
++
++2005-04-17    <mk@mary>
++
++	* Bugfixes
++
++2005-04-13  root  <mk@mary>
++
++	* Added file state.c for the state transition
++          functions. Doesn't work very well yet, though...
++
++2005-04-12    <mk@mary>
++
++	* Porting to 2.6 started, which is easier than expected, also
++          due to Olivier previous work.
++
++2005-04-08    <mk@mary>
++
++	* Fixed the bug that caused du to return invalid sizes of
++          directory trees. The problem was that
++          mini_fo_inode_revalidate didn't always copy the attributes
++          from the base inode properly.
++
++2005-04-01  Markus Klotzbuecher  <mk@chasey>
++
++	* Merged devel branch back to main trunk and updated the
++          RELEASE notes. This will be 0-6-0-pre1.
++
++2005-03-31  Markus Klotzbuecher  <mk@chasey>
++
++	* Fixed some bugs in rename_reg_file, that only showed up in
++          the kernel compile test. Kernel compiles cleanly ontop of
++          mini_fo, now also make mrproper etc. work. Seems pretty stable.
++
++2005-03-28  Markus Klotzbuecher  <mk@chasey>
++
++	* Many, many directory renaming bugfixes and a lot of other
++          cleanup. Dir renaming seems to work relatively stable.
++
++2005-03-22  Markus Klotzbuecher  <mk@chasey>
++
++	* Finished implementing lightweight directory renaming. Some
++          basic testing indicates it works fine.
++	  Next is to implement testcases for the testsuite and confirm
++          everything is really working ok.
++
++2005-03-18  Markus Klotzbuecher  <mk@chasey>
++
++	* Finished implementing meta.c stuff required for directory
++          renaming.
++
++2005-03-17  Markus Klotzbuecher  <mk@chasey>
++
++	* Fixed all compile warnings + an extremly old bug that
++          somehow crept in while reworking the wol stuff to the META
++          system. Turning on -Werror again... :-)
++
++	* Fixed some bugs in the new rename_reg_file function.
++
++	* Rewrote mini_fo rename and split it into several
++          subfunctions, that handle the different types
++          seperately. Rewrote the regular file function aswell, as it
++          was implemented somewhat inefficient. 
++
++2005-03-16  Markus Klotzbuecher  <mk@chasey>
++
++	* Implemented new META subsystem, removed old WOL stuff in favor 
++	  if it.
++
++	* After some basic testing everything seems ok...
++
++2005-03-11  Markus Klotzbuecher  <mk@chasey>
++
++	* Renaming a non regular file caused trouble because I always
++	  tried to copy the contents. Now I only do this for regular
++	  files. mini_fo_rename still isn't implemented properly, renaming
++	  of device files, symlinks etc. results in a empty regular file
++	  instead of the proper type.
++	
++	* Directory renaming suddenly works! What a surprise! I guess
++          this is because renaming is implemented as making a copy and
++          removing the original. Still this might not work
++          everywhere...
++
++2005-03-09  Markus Klotzbuecher  <mk@chasey>
++
++	* Bugfix, when a mini_fo directory that exists in storage
++  	  (state: MODIFIED, CREATED and DEL_REWRITTEN) is deleted, a
++  	  possibly existing WOL file contained in it needs to be
++  	  deleted too.
++
++	* Starting cleanup: defined state names in order to get rid of
++          the state numbers.
++
++2005-03-08  Markus Klotzbuecher  <mk@chasey>
++	
++	* Makefile fix, fist_ioctl was built against wrong sources if ARCH=um
++
++	* Fixed a bug in dentry.c, mini_fo_d_hash. In state 4 =
++          DEL_REWRITTEN the hash was calculated from the base dentry,
++          which was wrong and and caused assertions in
++          __mini_fo_hidden_dentry to fail.
++
++2005-02-21    <mk@mary>
++
++	* Implemented directory deleting (inode.c)
++
++	* main.c: made mini_fo_parse_options a little more robust.
++
++2004-12-22    <mk@mary>
++
++	* Makefile cleanup and uml stuff, removed unneccessary files
++
++	* Created a new and hopefully more informative README
++
++	* CHANGELOG: created a new CHANGELOG and added old entries reversely
++
++
++2004-10-24 Gleb Natapov <gleb@nbase.co.il>
++
++	* Fix: owner and group where not correctly copied from base to
++          storage. 
++
++
++2004-10-05 Gleb Natapov <gleb@nbase.co.il>
++
++	* Implementation of fsync, fasync and lock mini_fo functions.
++	
++
++2004-09-29 Bob Lee <bob@pantasys.com>
++
++	* Fix of a serious pointer bug
++	
++
++2004-09-28 Gleb Natapov <gleb@nbase.co.il>
++
++	* Implementation of mini_fo_mknod and mini_fo_rename, support
++          for device files.
++	
+diff -urN linux-2.6.21.1.old/fs/mini_fo/dentry.c linux-2.6.21.1.dev/fs/mini_fo/dentry.c
+--- linux-2.6.21.1.old/fs/mini_fo/dentry.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/dentry.c	2007-05-26 21:01:26.161330176 +0200
+@@ -0,0 +1,244 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ *  $Id$
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif
++
++#include "fist.h"
++#include "mini_fo.h"
++
++/*
++ * THIS IS A BOOLEAN FUNCTION: returns 1 if valid, 0 otherwise.
++ */
++STATIC int
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++mini_fo_d_revalidate(dentry_t *dentry, struct nameidata *nd)
++#else
++mini_fo_d_revalidate(dentry_t *dentry, int flags)
++#endif
++{
++	int err1 = 1; /* valid = 1, invalid = 0 */
++	int err2 = 1;
++	dentry_t *hidden_dentry;
++	dentry_t *hidden_sto_dentry;
++
++
++	check_mini_fo_dentry(dentry);
++
++	hidden_dentry  = dtohd(dentry);
++	hidden_sto_dentry = dtohd2(dentry);
++
++	if(hidden_dentry &&
++	   hidden_dentry->d_op &&
++	   hidden_dentry->d_op->d_revalidate) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++		err1 = hidden_dentry->d_op->d_revalidate(hidden_dentry, nd);
++#else
++		err1 = hidden_dentry->d_op->d_revalidate(hidden_dentry, flags);
++#endif
++	}
++	if(hidden_sto_dentry &&
++	   hidden_sto_dentry->d_op &&
++	   hidden_sto_dentry->d_op->d_revalidate) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++		err2 = hidden_sto_dentry->d_op->d_revalidate(hidden_sto_dentry, 
++							     nd);
++#else
++		err2 = hidden_sto_dentry->d_op->d_revalidate(hidden_sto_dentry, 
++							     flags);
++#endif
++	}
++
++	/* mk: if one of the lower level dentries are valid,
++	 * the mini_fo dentry is too.
++	 */
++	return (err1 || err2);
++}
++
++
++STATIC int
++mini_fo_d_hash(dentry_t *dentry, qstr_t *name)
++{
++	int err = 0;
++	dentry_t *hidden_dentry;
++	dentry_t *hidden_sto_dentry;
++
++	/* hidden_dentry = mini_fo_hidden_dentry(dentry);
++	 * hidden_sto_dentry = mini_fo_hidden_sto_dentry(dentry); */
++
++	/* state 1, 3, 4, 5: build the hash for the storage dentry */
++	if((dtopd(dentry)->state == MODIFIED) ||
++	   (dtopd(dentry)->state == CREATED) ||
++	   (dtopd(dentry)->state == DEL_REWRITTEN) ||
++	   (dtopd(dentry)->state == DELETED)) {
++		hidden_sto_dentry = dtohd2(dentry);
++		if(hidden_sto_dentry &&
++		   hidden_sto_dentry->d_op &&
++		   hidden_sto_dentry->d_op->d_hash) {
++			err = hidden_sto_dentry->d_op->d_hash(hidden_sto_dentry, name);
++		}
++		goto out;
++	}
++	/* state 2: build the hash for the base dentry */
++	if(dtopd(dentry)->state == UNMODIFIED) {
++		hidden_dentry = dtohd(dentry);
++		if(hidden_dentry &&
++		   hidden_dentry->d_op &&
++		   hidden_dentry->d_op->d_hash) {
++			err = hidden_dentry->d_op->d_hash(hidden_dentry, name);
++		}
++		goto out;
++	}
++	/* state 6: build hash for the dentry that exists */
++	if(dtopd(dentry)->state == NON_EXISTANT) {
++		hidden_sto_dentry = dtohd2(dentry);
++		if(hidden_sto_dentry &&
++		   hidden_sto_dentry->d_op &&
++		   hidden_sto_dentry->d_op->d_hash) {
++			err = hidden_sto_dentry->d_op->d_hash(hidden_sto_dentry, name);
++			goto out;
++		}
++		hidden_dentry = dtohd(dentry);
++		if(hidden_dentry &&
++		   hidden_dentry->d_op &&
++		   hidden_dentry->d_op->d_hash) {
++			err = hidden_dentry->d_op->d_hash(hidden_dentry, name);
++			goto out;
++		}
++	}
++
++	printk(KERN_CRIT "mini_fo: d_hash: invalid state detected.\n");
++
++ out:
++	return err;
++}
++
++
++STATIC int
++mini_fo_d_compare(dentry_t *dentry, qstr_t *a, qstr_t *b)
++{
++	int err;
++	dentry_t *hidden_dentry=NULL;
++
++	/* hidden_dentry = mini_fo_hidden_dentry(dentry); */
++	if(dtohd2(dentry))
++		hidden_dentry = dtohd2(dentry);
++	else if(dtohd(dentry))
++		hidden_dentry = dtohd(dentry);
++
++	if (hidden_dentry && hidden_dentry->d_op && hidden_dentry->d_op->d_compare) {
++		err = hidden_dentry->d_op->d_compare(hidden_dentry, a, b);
++	} else {
++		err = ((a->len != b->len) || memcmp(a->name, b->name, b->len));
++	}
++
++	return err;
++}
++
++
++int
++mini_fo_d_delete(dentry_t *dentry)
++{
++	dentry_t *hidden_dentry;
++	dentry_t *hidden_sto_dentry;
++	int err = 0;
++
++	/* this could be a negative dentry, so check first */
++	if (!dtopd(dentry)) {
++		printk(KERN_CRIT "mini_fo_d_delete: negative dentry passed.\n");
++		goto out;
++	}
++	hidden_dentry = dtohd(dentry);
++	hidden_sto_dentry = dtohd2(dentry);
++
++	if(hidden_dentry) {
++		if(hidden_dentry->d_op &&
++		   hidden_dentry->d_op->d_delete) {
++			err = hidden_dentry->d_op->d_delete(hidden_dentry);
++		}
++	}
++	if(hidden_sto_dentry) {
++		if(hidden_sto_dentry->d_op &&
++		   hidden_sto_dentry->d_op->d_delete) {
++			err = hidden_sto_dentry->d_op->d_delete(hidden_sto_dentry);
++		}
++	}
++
++ out:
++	return err;
++}
++
++
++void
++mini_fo_d_release(dentry_t *dentry)
++{
++	dentry_t *hidden_dentry;
++	dentry_t *hidden_sto_dentry;
++
++
++	/* this could be a negative dentry, so check first */
++	if (!dtopd(dentry)) {
++		printk(KERN_CRIT "mini_fo_d_release: no private data.\n");
++		goto out;
++	}
++	hidden_dentry = dtohd(dentry);
++	hidden_sto_dentry = dtohd2(dentry);
++
++	if(hidden_dentry) {
++		/* decrement hidden dentry's counter and free its inode */
++		dput(hidden_dentry);
++	}
++	if(hidden_sto_dentry) {
++                /* decrement hidden dentry's counter and free its inode */
++		dput(hidden_sto_dentry);
++	}
++
++	/* free private data (mini_fo_dentry_info) here */
++	kfree(dtopd(dentry));
++	__dtopd(dentry) = NULL;	/* just to be safe */
++ out:
++	return;
++}
++
++
++/*
++ * we don't really need mini_fo_d_iput, because dentry_iput will call iput() if
++ * mini_fo_d_iput is not defined. We left this implemented for ease of
++ * tracing/debugging.
++ */
++void
++mini_fo_d_iput(dentry_t *dentry, inode_t *inode)
++{
++	iput(inode);
++}
++
++
++struct dentry_operations mini_fo_dops = {
++	d_revalidate:	mini_fo_d_revalidate,
++	d_hash:		mini_fo_d_hash,
++	d_compare:		mini_fo_d_compare,
++	d_release:		mini_fo_d_release,
++	d_delete:		mini_fo_d_delete,
++	d_iput:		mini_fo_d_iput,
++};
+diff -urN linux-2.6.21.1.old/fs/mini_fo/file.c linux-2.6.21.1.dev/fs/mini_fo/file.c
+--- linux-2.6.21.1.old/fs/mini_fo/file.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/file.c	2007-05-26 21:01:26.162330024 +0200
+@@ -0,0 +1,713 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ *  $Id$
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif
++
++#include "fist.h"
++#include "mini_fo.h"
++#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
++
++/*******************
++ * File Operations *
++ *******************/
++
++STATIC loff_t
++mini_fo_llseek(file_t *file, loff_t offset, int origin)
++{
++	loff_t err;
++	file_t *hidden_file = NULL;
++
++	if(S_ISDIR(file->f_dentry->d_inode->i_mode)) {
++		/* Check if trying to llseek from a directory */
++		err = -EISDIR;
++		goto out;
++	}
++	if (ftopd(file) != NULL) {
++		if(ftohf2(file)) {
++			hidden_file = ftohf2(file);
++		} else {
++			hidden_file = ftohf(file);
++		}
++	}
++
++	/* always set hidden position to this one */
++	hidden_file->f_pos = file->f_pos;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++	memcpy(&(hidden_file->f_ra), 
++	       &(file->f_ra), 
++	       sizeof(struct file_ra_state));
++#else
++	if (file->f_reada) { /* update readahead information if needed */
++		hidden_file->f_reada = file->f_reada;
++		hidden_file->f_ramax = file->f_ramax;
++		hidden_file->f_raend = file->f_raend;
++		hidden_file->f_ralen = file->f_ralen;
++		hidden_file->f_rawin = file->f_rawin;
++	}
++#endif
++	if (hidden_file->f_op && hidden_file->f_op->llseek)
++		err = hidden_file->f_op->llseek(hidden_file, offset, origin);
++	else
++		err = generic_file_llseek(hidden_file, offset, origin);
++
++	if (err < 0)
++		goto out;
++
++	if (err != file->f_pos) {
++		file->f_pos = err;
++		// ION maybe this?
++		// 	file->f_pos = hidden_file->f_pos;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++		file->f_reada = 0;
++#endif
++		file->f_version++;
++	}
++
++ out:
++	return err;
++}
++
++
++/* mk: fanout capable */
++STATIC ssize_t
++mini_fo_read(file_t *file, char *buf, size_t count, loff_t *ppos)
++{
++	int err = -EINVAL;
++	file_t *hidden_file = NULL;
++	loff_t pos = *ppos;
++
++	if(S_ISDIR(file->f_dentry->d_inode->i_mode)) {
++		/* Check if trying to read from a directory */
++		/* printk(KERN_CRIT "mini_fo_read: ERROR: trying to read data from a directory.\n"); */
++		err = -EISDIR;
++		goto out;
++	}
++
++	if (ftopd(file) != NULL) {
++		if(ftohf2(file)) {
++			hidden_file = ftohf2(file);
++		} else {
++			hidden_file = ftohf(file);
++		}
++	}
++
++	if (!hidden_file->f_op || !hidden_file->f_op->read)
++		goto out;
++
++	err = hidden_file->f_op->read(hidden_file, buf, count, &pos);
++	*ppos = pos;
++
++	if (err >= 0) {
++		/* atime should also be updated for reads of size zero or more */
++		fist_copy_attr_atime(file->f_dentry->d_inode,
++				     hidden_file->f_dentry->d_inode);
++	}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++	/*
++	 * MAJOR HACK
++	 * because pread() does not have any way to tell us that it is
++	 * our caller, then we don't know for sure if we have to update
++	 * the file positions.  This hack relies on read() having passed us
++	 * the "real" pointer of its struct file's f_pos field.
++	 */
++	if (ppos == &file->f_pos)
++		hidden_file->f_pos = *ppos = pos;
++	if (hidden_file->f_reada) { /* update readahead information if needed */
++		file->f_reada = hidden_file->f_reada;
++		file->f_ramax = hidden_file->f_ramax;
++		file->f_raend = hidden_file->f_raend;
++		file->f_ralen = hidden_file->f_ralen;
++		file->f_rawin = hidden_file->f_rawin;
++	}
++#else
++	memcpy(&(file->f_ra),&(hidden_file->f_ra),sizeof(struct file_ra_state));
++#endif
++
++ out:
++	return err;
++}
++
++
++/* this mini_fo_write() does not modify data pages! */
++STATIC ssize_t
++mini_fo_write(file_t *file, const char *buf, size_t count, loff_t *ppos)
++{
++	int err = -EINVAL;
++	file_t *hidden_file = NULL;
++	inode_t *inode;
++	inode_t *hidden_inode;
++	loff_t pos = *ppos;
++
++	/* mk: fan out: */
++	if (ftopd(file) != NULL) {
++		if(ftohf2(file)) {
++			hidden_file = ftohf2(file);
++		} else {
++			/* This is bad! We have no storage file to write to. This
++			 * should never happen because if a file is opened for
++			 * writing, a copy should have been made earlier.
++			 */
++			printk(KERN_CRIT "mini_fo: write : ERROR, no storage file to write.\n");
++			err = -EINVAL;
++			goto out;
++		}
++	}
++
++	inode = file->f_dentry->d_inode;
++	hidden_inode = itohi2(inode);
++	if(!hidden_inode) {
++		printk(KERN_CRIT "mini_fo: write: no sto inode found, not good.\n");
++		goto out;
++	}
++
++	if (!hidden_file->f_op || !hidden_file->f_op->write)
++		goto out;
++
++	/* adjust for append -- seek to the end of the file */
++	if (file->f_flags & O_APPEND)
++		pos = inode->i_size;
++
++	err = hidden_file->f_op->write(hidden_file, buf, count, &pos);
++
++	/*
++	 * copy ctime and mtime from lower layer attributes
++	 * atime is unchanged for both layers
++	 */
++	if (err >= 0)
++		fist_copy_attr_times(inode, hidden_inode);
++	
++	*ppos = pos;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++	/*
++	 * XXX: MAJOR HACK
++	 *
++	 * because pwrite() does not have any way to tell us that it is
++	 * our caller, then we don't know for sure if we have to update
++	 * the file positions.  This hack relies on write() having passed us
++	 * the "real" pointer of its struct file's f_pos field.
++	 */
++	if (ppos == &file->f_pos)
++		hidden_file->f_pos = *ppos = pos;
++#endif
++	/* update this inode's size */
++	if (pos > inode->i_size)
++		inode->i_size = pos;
++
++ out:
++	return err;
++}
++
++/* Global variable to hold a file_t pointer.
++ * This serves to allow mini_fo_filldir function to know which file is
++ * beeing read, which is required for two reasons:
++ *
++ *   - be able to call wol functions in order to avoid listing deleted
++ *     base files.
++ *   - if we're reading a directory which is in state 1, we need to
++ *     maintain a list (in mini_fo_filldir) of which files allready 
++ *     have been copied to userspace,to detect files existing in base
++ *     and storage and not list them twice.
++ */
++filldir_t mini_fo_filldir_orig;
++file_t *mini_fo_filldir_file;
++
++/* mainly copied from fs/readdir.c */
++STATIC int
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++mini_fo_filldir(void * __buf, const char * name, int namlen, loff_t offset,
++		  u64 ino, unsigned int d_type)
++#else
++mini_fo_filldir(void * __buf, const char * name, int namlen, loff_t offset,
++		  ino_t ino, unsigned int d_type)
++#endif
++{
++	struct getdents_callback * buf = (struct getdents_callback *) __buf;
++	file_t* file = mini_fo_filldir_file;
++
++	/* In theses states we filter meta files in storage (WOL) */
++	if(file && (dtopd(file->f_dentry)->state == MODIFIED ||
++		    dtopd(file->f_dentry)->state == CREATED ||
++		    dtopd(file->f_dentry)->state == DEL_REWRITTEN)) {
++
++		int tmp = strlen(META_FILENAME);
++		if(tmp  == namlen) {
++			if(!strncmp(name, META_FILENAME, namlen))
++				return 0;
++		}
++	}
++
++	/* check if we are merging the contents of storage and base */
++	if(file && dtopd(file->f_dentry)->state == MODIFIED) {
++		/* check if we are still reading storage contents, if
++		 * yes, we just save the name of the file for duplicate
++		 * checking later. */
++
++		if(!ftopd(file)->rd.sto_done) {
++			/* put file into ndl list */
++			if(ndl_add_entry(&ftopd(file)->rd, name, namlen))
++				printk(KERN_CRIT "mini_fo_filldir: Error adding to ndl.\n");
++		} else {
++			/* check if file has been deleted */
++			if(meta_check_d_entry(file->f_dentry, name, namlen))
++				return 0;
++			
++			/* do duplicate checking */
++			if(ndl_check_entry(&ftopd(file)->rd, name, namlen))
++				return 0;
++		}
++	}
++
++	return mini_fo_filldir_orig(buf, name, namlen, offset, ino, d_type);
++}
++
++
++STATIC int
++mini_fo_readdir(file_t *file, void *dirent, filldir_t filldir)
++{
++	int err = 0;/* mk: ??? -ENOTDIR; */
++	file_t *hidden_file = NULL;
++	file_t *hidden_sto_file = NULL;
++	inode_t *inode;
++	struct getdents_callback *buf;
++	int oldcount;
++
++#if defined(FIST_FILTER_NAME) || defined(FIST_FILTER_SCA)
++	struct mini_fo_getdents_callback buf;
++#endif /* FIST_FILTER_NAME || FIST_FILTER_SCA */
++
++	buf = (struct getdents_callback *) dirent;
++	oldcount = buf->count;
++	inode = file->f_dentry->d_inode;
++	mini_fo_filldir_file = file;
++	mini_fo_filldir_orig = filldir;
++
++	ftopd(file)->rd.sto_done = 0;
++	do {
++		if (ftopd(file) != NULL) {
++			if(ftohf2(file)) { 
++				hidden_sto_file = ftohf2(file);
++				err = vfs_readdir(hidden_sto_file, mini_fo_filldir, dirent);
++				file->f_pos = hidden_sto_file->f_pos;
++				if (err > 0)
++					fist_copy_attr_atime(inode, hidden_sto_file->f_dentry->d_inode);
++				/* not finshed yet, we'll be called again */
++				if (buf->count != oldcount)
++					break;
++			}
++
++			ftopd(file)->rd.sto_done = 1;
++
++			if(ftohf(file)) { 
++				hidden_file = ftohf(file);
++				err = vfs_readdir(hidden_file, mini_fo_filldir, dirent);
++				file->f_pos = hidden_file->f_pos;
++				if (err > 0)
++					fist_copy_attr_atime(inode, hidden_file->f_dentry->d_inode);
++			}
++
++		}
++	} while (0);
++
++	/* mk:
++	 * we need to check if all the directory data has been copied to userspace,
++	 * or if we will be called again by userspace to complete the operation.
++	 */
++	if(buf->count == oldcount) {
++		ndl_put_list(&ftopd(file)->rd);
++	}
++
++	/* unset this, safe */
++	mini_fo_filldir_file = NULL;
++	return err;
++}
++
++
++STATIC unsigned int
++mini_fo_poll(file_t *file, poll_table *wait)
++{
++	unsigned int mask = DEFAULT_POLLMASK;
++	file_t *hidden_file = NULL;
++
++	if (ftopd(file) != NULL) {
++		if(ftohf2(file)) {
++			hidden_file = ftohf2(file);
++		} else {
++			hidden_file = ftohf(file);
++		}
++	}
++
++	if (!hidden_file->f_op || !hidden_file->f_op->poll)
++		goto out;
++
++	mask = hidden_file->f_op->poll(hidden_file, wait);
++
++ out:
++	return mask;
++}
++
++/* FIST-LITE special version of mmap */
++STATIC int
++mini_fo_mmap(file_t *file, vm_area_t *vma)
++{
++	int err = 0;
++	file_t *hidden_file = NULL;
++
++	/* fanout capability */
++	if (ftopd(file) != NULL) {
++		if(ftohf2(file)) {
++			hidden_file = ftohf2(file);
++		} else {
++			hidden_file = ftohf(file);
++		}
++	}
++
++	ASSERT(hidden_file != NULL);
++	ASSERT(hidden_file->f_op != NULL);
++	ASSERT(hidden_file->f_op->mmap != NULL);
++
++	vma->vm_file = hidden_file;
++	err = hidden_file->f_op->mmap(hidden_file, vma);
++	get_file(hidden_file); /* make sure it doesn't get freed on us */
++	fput(file);	       /* no need to keep extra ref on ours */
++
++	return err;
++}
++
++
++
++STATIC int
++mini_fo_open(inode_t *inode, file_t *file)
++{
++	int err = 0;
++ 	int hidden_flags; 
++	file_t *hidden_file = NULL;
++	dentry_t *hidden_dentry = NULL;
++
++	/* fanout stuff */
++	file_t *hidden_sto_file = NULL;
++	dentry_t *hidden_sto_dentry = NULL;
++
++	__ftopd(file) = 
++		kmalloc(sizeof(struct mini_fo_file_info), GFP_KERNEL);
++	if (!ftopd(file)) {
++		err = -ENOMEM;
++		goto out;
++	}
++
++	/* init the readdir_helper structure */
++	INIT_LIST_HEAD(&ftopd(file)->rd.ndl_list);
++	ftopd(file)->rd.ndl_size = 0;
++
++	/* In certain paths this could stay uninitalized and cause trouble */
++	ftohf(file) = NULL;
++	ftohf2(file) = NULL;
++	hidden_flags = file->f_flags;
++
++	/* create storage files? */
++	if(dtost(file->f_dentry) == UNMODIFIED) {
++		if(!IS_WRITE_FLAG(file->f_flags)) {
++			hidden_dentry = dtohd(file->f_dentry);
++			dget(hidden_dentry);
++			/* dentry_open will decrement mnt refcnt if err.
++			 * otherwise fput() will do an mntput() for us upon file close. */
++			mntget(stopd(inode->i_sb)->hidden_mnt);
++			hidden_file = dentry_open(hidden_dentry,
++						  stopd(inode->i_sb)->hidden_mnt,
++						  hidden_flags);
++			if (IS_ERR(hidden_file)) {
++				err = PTR_ERR(hidden_file);
++				dput(hidden_dentry);
++				goto out;
++			}
++			ftohf(file) = hidden_file;	/* link two files */
++			goto out;
++		}
++		else {
++			if(S_ISDIR(file->f_dentry->d_inode->i_mode)) {
++				err = dir_unmod_to_mod(file->f_dentry);
++			} else
++				err = nondir_unmod_to_mod(file->f_dentry, 1);
++
++			if (err) {
++				printk("mini_fo_open: ERROR creating storage file.\n");
++				goto out;
++			}
++		}
++	}
++	hidden_sto_dentry = dtohd2(file->f_dentry);
++	dget(hidden_sto_dentry);
++
++	if(dtopd(file->f_dentry)->state == MODIFIED) {
++		/* Directorys are special, interpose on both lower level files */
++		if(S_ISDIR(itohi(inode)->i_mode)) {
++			/* check for invalid file types of lower level files */
++			if(!(S_ISDIR(itohi(inode)->i_mode) && S_ISDIR(itohi2(inode)->i_mode))) {
++				printk(KERN_CRIT "mini_fo_open: meta data corruption detected.\n");
++				dput(hidden_sto_dentry);
++				err = -EINVAL;
++				goto out;
++			}
++
++			/* lower level directorys are ok, open the base file */
++			hidden_dentry = dtohd(file->f_dentry);
++			dget(hidden_dentry);
++
++			mntget(stopd(inode->i_sb)->hidden_mnt);
++			hidden_file = dentry_open(hidden_dentry,
++						  stopd(inode->i_sb)->hidden_mnt,
++						  hidden_flags);
++			if (IS_ERR(hidden_file)) {
++				err = PTR_ERR(hidden_file);
++				dput(hidden_dentry);
++				dput(hidden_sto_dentry);
++				goto out;
++			}
++			ftohf(file) = hidden_file; /* link the two files */
++		}
++	}
++
++	if(!exists_in_storage(file->f_dentry)) {
++		printk(KERN_CRIT "mini_fo_open: invalid file state detected.\n");
++		err = -EINVAL;
++		dput(hidden_sto_dentry);
++
++		/* If the base file has been opened, we need to close it here */
++		if(ftohf(file)) {
++			if (hidden_file->f_op && hidden_file->f_op->flush)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++				hidden_file->f_op->flush(hidden_file, NULL);
++#else
++				hidden_file->f_op->flush(hidden_file);
++#endif
++			dput(hidden_dentry);
++		}
++		goto out;
++	}
++
++	/* ok, now we can safely open the storage file */
++	mntget(stopd(inode->i_sb)->hidden_mnt2);
++	hidden_sto_file = dentry_open(hidden_sto_dentry,
++				      stopd(inode->i_sb)->hidden_mnt2,
++				      hidden_flags);
++
++	/* dentry_open dputs the dentry if it fails */
++	if (IS_ERR(hidden_sto_file)) {
++		err = PTR_ERR(hidden_sto_file);
++		/* close base file if open */
++		if(ftohf(file)) {
++			if (hidden_file->f_op && hidden_file->f_op->flush)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++				hidden_file->f_op->flush(hidden_file, NULL);
++#else
++				hidden_file->f_op->flush(hidden_file);
++#endif
++			dput(hidden_dentry);
++		}
++		goto out;
++	}
++	ftohf2(file) = hidden_sto_file; /* link storage file */
++	
++ out:
++	if (err < 0 && ftopd(file)) {
++		kfree(ftopd(file));
++	}
++	return err;
++}
++
++STATIC int
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++mini_fo_flush(file_t *file, fl_owner_t id)
++#else
++mini_fo_flush(file_t *file)
++#endif
++{
++	int err1 = 0;		/* assume ok (see open.c:close_fp) */
++	int err2 = 0;
++	file_t *hidden_file = NULL;
++	
++	check_mini_fo_file(file);
++
++	/* mk: we don't do any state checking here, as its not worth the time.
++	 * Just flush the lower level files if they exist.
++	 */
++	if(ftopd(file) != NULL) {
++		if(ftohf(file) != NULL) {
++			hidden_file = ftohf(file);
++			if (hidden_file->f_op && hidden_file->f_op->flush)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++				err1 = hidden_file->f_op->flush(hidden_file, id);
++#else
++				err1 = hidden_file->f_op->flush(hidden_file);
++#endif
++		}
++		if(ftohf2(file) != NULL) {
++			hidden_file = ftohf2(file);
++			if (hidden_file->f_op && hidden_file->f_op->flush)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++				err2 = hidden_file->f_op->flush(hidden_file, id);
++#else
++				err2 = hidden_file->f_op->flush(hidden_file);
++#endif
++		}
++	}
++	return (err1 | err2);
++}
++
++
++STATIC int
++mini_fo_release(inode_t *inode, file_t *file)
++{
++	int err = 0;
++	file_t *hidden_file = NULL;
++
++	if (ftopd(file) != NULL) {
++		if(ftohf(file)) {
++			hidden_file = ftohf(file);
++			fput(hidden_file);
++		}
++		if(ftohf2(file)) {
++			hidden_file = ftohf2(file);
++			fput(hidden_file);
++		}
++		kfree(ftopd(file));
++	}
++	return err;
++}
++
++STATIC int
++mini_fo_fsync(file_t *file, dentry_t *dentry, int datasync)
++{
++	int err1 = 0;
++	int err2 = 0;
++	file_t *hidden_file = NULL;
++	dentry_t *hidden_dentry;
++
++	check_mini_fo_file(file);
++
++	if ((hidden_file = ftohf(file)) != NULL) {
++		hidden_dentry = dtohd(dentry);
++		if (hidden_file->f_op && hidden_file->f_op->fsync) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++			mutex_lock(&hidden_dentry->d_inode->i_mutex);
++#else
++			down(&hidden_dentry->d_inode->i_sem);
++#endif
++			err1 = hidden_file->f_op->fsync(hidden_file, hidden_dentry, datasync);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++			mutex_unlock(&hidden_dentry->d_inode->i_mutex);
++#else
++			up(&hidden_dentry->d_inode->i_sem);
++#endif
++		}
++	}
++
++	if ((hidden_file = ftohf2(file)) != NULL) {
++		hidden_dentry = dtohd2(dentry);
++		if (hidden_file->f_op && hidden_file->f_op->fsync) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++			mutex_lock(&hidden_dentry->d_inode->i_mutex);
++#else
++			down(&hidden_dentry->d_inode->i_sem);
++#endif
++			err2 = hidden_file->f_op->fsync(hidden_file, hidden_dentry, datasync);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++			mutex_unlock(&hidden_dentry->d_inode->i_mutex);
++#else
++			up(&hidden_dentry->d_inode->i_sem);
++#endif
++		}
++	}
++	else
++		goto err;
++
++err:
++	return (err1 || err2);
++}
++
++
++STATIC int
++mini_fo_fasync(int fd, file_t *file, int flag)
++{
++	int err1 = 0;
++	int err2 = 0;
++
++	file_t *hidden_file = NULL;
++
++	check_mini_fo_file(file);
++
++	if((hidden_file = ftohf(file)) != NULL) {
++		err1 = hidden_file->f_op->fasync(fd, hidden_file, flag);
++	}
++	if((hidden_file = ftohf2(file)) != NULL) {
++		err2 = hidden_file->f_op->fasync(fd, hidden_file, flag);
++	}
++	
++	return (err1 || err2);
++}
++
++
++
++struct file_operations mini_fo_dir_fops =
++	{
++		read:	generic_read_dir,
++		write:	mini_fo_write,
++		readdir: mini_fo_readdir,
++		poll:	mini_fo_poll,
++		/* ioctl:	mini_fo_ioctl, */
++		mmap:	mini_fo_mmap,
++		open:	mini_fo_open,
++		flush:	mini_fo_flush,
++		release: mini_fo_release,
++		fsync:	mini_fo_fsync,
++		fasync:	mini_fo_fasync,
++		/* not needed lock:	mini_fo_lock, */
++		/* not needed: readv */
++		/* not needed: writev */
++		/* not implemented: sendpage */
++		/* not implemented: get_unmapped_area */
++	};
++
++struct file_operations mini_fo_main_fops =
++	{
++		llseek:	mini_fo_llseek,
++		read:	mini_fo_read,
++		write:	mini_fo_write,
++		readdir: mini_fo_readdir,
++		poll:	mini_fo_poll,
++		/* ioctl:	mini_fo_ioctl, */
++		mmap:	mini_fo_mmap,
++		open:	mini_fo_open,
++		flush:	mini_fo_flush,
++		release: mini_fo_release,
++		fsync:	mini_fo_fsync,
++		fasync:	mini_fo_fasync,
++		/* not needed: lock:	mini_fo_lock, */
++		/* not needed: readv */
++		/* not needed: writev */
++		/* not implemented: sendpage */
++		/* not implemented: get_unmapped_area */
++	};
+diff -urN linux-2.6.21.1.old/fs/mini_fo/fist.h linux-2.6.21.1.dev/fs/mini_fo/fist.h
+--- linux-2.6.21.1.old/fs/mini_fo/fist.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/fist.h	2007-05-26 21:01:26.162330024 +0200
+@@ -0,0 +1,252 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++
++/*
++ *  $Id$
++ */
++
++#ifndef __FIST_H_
++#define __FIST_H_
++
++/*
++ * KERNEL ONLY CODE:
++ */
++#ifdef __KERNEL__
++#include <linux/version.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
++#include <linux/autoconf.h>
++#else
++#include <linux/config.h>
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#ifdef CONFIG_MODVERSIONS
++# define MODVERSIONS
++# include <linux/modversions.h>
++#endif /* CONFIG_MODVERSIONS */
++#endif /* KERNEL_VERSION < 2.6.0 */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/stat.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/limits.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/locks.h>
++#else
++#include <linux/buffer_head.h>
++#include <linux/pagemap.h>
++#include <linux/namei.h>
++#include <linux/module.h>
++#include <linux/mount.h>
++#include <linux/page-flags.h>
++#include <linux/writeback.h>
++#include <linux/statfs.h>
++#endif
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/file.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/poll.h>
++#include <linux/list.h>
++#include <linux/init.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20)
++#include <linux/xattr.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++#include <linux/security.h>
++#endif
++
++#include <linux/swap.h>
++
++#include <asm/system.h>
++/* #include <asm/segment.h> */
++#include <asm/mman.h>
++#include <linux/seq_file.h>
++
++/*
++ * MACROS:
++ */
++
++/* those mapped to ATTR_* were copied from linux/fs.h */
++#define FA_MODE		ATTR_MODE
++#define FA_UID		ATTR_UID
++#define FA_GID		ATTR_GID
++#define FA_SIZE		ATTR_SIZE
++#define FA_ATIME	ATTR_ATIME
++#define FA_MTIME	ATTR_MTIME
++#define FA_CTIME	ATTR_CTIME
++#define FA_ATIME_SET	ATTR_ATIME_SET
++#define FA_MTIME_SET	ATTR_MTIME_SET
++#define FA_FORCE	ATTR_FORCE
++#define FA_ATTR_FLAGS	ATTR_ATTR_FLAG
++
++/* must be greater than all other ATTR_* flags! */
++#define FA_NLINK	2048
++#define FA_BLKSIZE	4096
++#define FA_BLOCKS	8192
++#define FA_TIMES	(FA_ATIME|FA_MTIME|FA_CTIME)
++#define FA_ALL		0
++
++/* macros to manage changes between kernels */
++#define INODE_DATA(i)	(&(i)->i_data)
++
++#define MIN(x,y) ((x < y) ? (x) : (y))
++#define MAX(x,y) ((x > y) ? (x) : (y))
++#define MAXPATHLEN PATH_MAX
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5)
++# define lookup_one_len(a,b,c) lookup_one(a,b)
++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) */
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8)
++# define generic_file_llseek default_llseek
++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) */
++
++#ifndef SEEK_SET
++# define SEEK_SET 0
++#endif /* not SEEK_SET */
++
++#ifndef SEEK_CUR
++# define SEEK_CUR 1
++#endif /* not SEEK_CUR */
++
++#ifndef SEEK_END
++# define SEEK_END 2
++#endif /* not SEEK_END */
++
++#ifndef DEFAULT_POLLMASK
++# define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
++#endif /* not DEFAULT_POLLMASK */
++
++/* XXX: fix this so fistgen generates kfree() code directly */
++#define kfree_s(a,b) kfree(a)
++
++/*
++ * TYPEDEFS:
++ */
++typedef struct dentry dentry_t;
++typedef struct file file_t;
++typedef struct inode inode_t;
++typedef inode_t vnode_t;
++typedef struct page page_t;
++typedef struct qstr qstr_t;
++typedef struct super_block super_block_t;
++typedef super_block_t vfs_t;
++typedef struct vm_area_struct vm_area_t;
++
++
++/*
++ * EXTERNALS:
++ */
++
++#define FPPF(str,page) printk("PPF %s 0x%x/%d: Lck:%d Err:%d Ref:%d Upd:%d Other::%d:%d:%d:%d:\n", \
++		str, \
++		(int) page, \
++		(int) page->index, \
++		(PageLocked(page) ? 1 : 0), \
++		(PageError(page) ? 1 : 0), \
++		(PageReferenced(page) ? 1 : 0), \
++		(Page_Uptodate(page) ? 1 : 0), \
++		(PageDecrAfter(page) ? 1 : 0), \
++		(PageSlab(page) ? 1 : 0), \
++		(PageSwapCache(page) ? 1 : 0), \
++		(PageReserved(page) ? 1 : 0) \
++		)
++#define EZKDBG printk("EZK %s:%d:%s\n",__FILE__,__LINE__,__FUNCTION__)
++#if 0
++# define EZKDBG1 printk("EZK %s:%d\n",__FILE__,__LINE__)
++#else
++# define EZKDBG1
++#endif
++
++extern int fist_get_debug_value(void);
++extern int fist_set_debug_value(int val);
++#if 0 /* mini_fo doesn't need these */
++extern void fist_dprint_internal(int level, char *str,...);
++extern void fist_print_dentry(char *str, const dentry_t *dentry);
++extern void fist_print_inode(char *str, const inode_t *inode);
++extern void fist_print_file(char *str, const file_t *file);
++extern void fist_print_buffer_flags(char *str, struct buffer_head *buffer);
++extern void fist_print_page_flags(char *str, page_t *page);
++extern void fist_print_page_bytes(char *str, page_t *page);
++extern void fist_print_pte_flags(char *str, const page_t *page);
++extern void fist_checkinode(inode_t *inode, char *msg);
++extern void fist_print_sb(char *str, const super_block_t *sb);
++
++/* §$% by mk: special debug functions */
++extern void fist_mk_print_dentry(char *str, const dentry_t *dentry);
++extern void fist_mk_print_inode(char *str, const inode_t *inode);
++
++extern char *add_indent(void);
++extern char *del_indent(void);
++#endif/* mini_fo doesn't need these */
++
++
++#define STATIC
++#define ASSERT(EX)	\
++do {	\
++    if (!(EX)) {	\
++	printk(KERN_CRIT "ASSERTION FAILED: %s at %s:%d (%s)\n", #EX,	\
++	       __FILE__, __LINE__, __FUNCTION__);	\
++	(*((char *)0))=0;	\
++    }	\
++} while (0)
++/* same ASSERT, but tell me who was the caller of the function */
++#define ASSERT2(EX)	\
++do {	\
++    if (!(EX)) {	\
++	printk(KERN_CRIT "ASSERTION FAILED (caller): %s at %s:%d (%s)\n", #EX,	\
++	       file, line, func);	\
++	(*((char *)0))=0;	\
++    }	\
++} while (0)
++
++#if 0 /* mini_fo doesn't need these */
++#define dprintk(format, args...) printk(KERN_DEBUG format, ##args)
++#define fist_dprint(level, str, args...) fist_dprint_internal(level, KERN_DEBUG str, ## args)
++#define print_entry_location() fist_dprint(4, "%sIN:  %s %s:%d\n", add_indent(), __FUNCTION__, __FILE__, __LINE__)
++#define print_exit_location() fist_dprint(4, "%s OUT: %s %s:%d\n", del_indent(), __FUNCTION__, __FILE__, __LINE__)
++#define print_exit_status(status) fist_dprint(4, "%s OUT: %s %s:%d, STATUS: %d\n", del_indent(), __FUNCTION__, __FILE__, __LINE__, status)
++#define print_exit_pointer(status) \
++do { \
++  if (IS_ERR(status)) \
++    fist_dprint(4, "%s OUT: %s %s:%d, RESULT: %ld\n", del_indent(), __FUNCTION__, __FILE__, __LINE__, PTR_ERR(status)); \
++  else \
++    fist_dprint(4, "%s OUT: %s %s:%d, RESULT: 0x%x\n", del_indent(), __FUNCTION__, __FILE__, __LINE__, PTR_ERR(status)); \
++} while (0)
++#endif/* mini_fo doesn't need these */
++
++#endif /* __KERNEL__ */
++
++
++/*
++ * DEFINITIONS FOR USER AND KERNEL CODE:
++ * (Note: ioctl numbers 1--9 are reserved for fistgen, the rest
++ *  are auto-generated automatically based on the user's .fist file.)
++ */
++# define FIST_IOCTL_GET_DEBUG_VALUE	_IOR(0x15, 1, int)
++# define FIST_IOCTL_SET_DEBUG_VALUE	_IOW(0x15, 2, int)
++
++#endif /* not __FIST_H_ */
+diff -urN linux-2.6.21.1.old/fs/mini_fo/inode.c linux-2.6.21.1.dev/fs/mini_fo/inode.c
+--- linux-2.6.21.1.old/fs/mini_fo/inode.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/inode.c	2007-05-26 21:01:26.164329720 +0200
+@@ -0,0 +1,1573 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ *  $Id$
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif 
++
++#include "fist.h"
++#include "mini_fo.h"
++
++STATIC int
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++mini_fo_create(inode_t *dir, dentry_t *dentry, int mode, struct nameidata *nd)
++#else
++mini_fo_create(inode_t *dir, dentry_t *dentry, int mode)
++#endif
++{
++	int err = 0;
++
++	check_mini_fo_dentry(dentry);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++	err = create_sto_reg_file(dentry, mode, nd);
++#else
++	err = create_sto_reg_file(dentry, mode);
++#endif
++	check_mini_fo_dentry(dentry);
++	return err;
++}
++
++
++STATIC dentry_t *
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++mini_fo_lookup(inode_t *dir, dentry_t *dentry, struct nameidata* nd)
++#else
++mini_fo_lookup(inode_t *dir, dentry_t *dentry)
++#endif
++{
++	int err = 0;
++	dentry_t *hidden_dir_dentry;
++	dentry_t *hidden_dentry = NULL;
++
++	dentry_t *hidden_sto_dir_dentry;
++	dentry_t *hidden_sto_dentry = NULL;
++
++	/* whiteout flag */
++	int del_flag = 0; 
++	char *bpath = NULL;
++
++	const char *name;
++	unsigned int namelen;
++
++	/* Don't allow lookups of META-files */
++	namelen = strlen(META_FILENAME);
++	if(namelen == dentry->d_name.len) {
++		if(!strncmp(dentry->d_name.name, META_FILENAME, namelen)) {
++			err = -ENOENT;
++			goto out;
++		}
++	}
++
++	hidden_dir_dentry = dtohd(dentry->d_parent);
++	hidden_sto_dir_dentry = dtohd2(dentry->d_parent);
++
++	name = dentry->d_name.name;
++	namelen = dentry->d_name.len;
++
++	/* must initialize dentry operations */
++	dentry->d_op = &mini_fo_dops;
++
++	/* setup the del_flag */
++	del_flag = __meta_check_d_entry(dir, name, namelen);
++	bpath = __meta_check_r_entry(dir, name, namelen);
++
++	/* perform the lookups of base and storage files:
++	 *
++	 * This caused some serious trouble, as a lookup_one_len passing
++	 * a negative dentry oopses. Solution is to only do the lookup
++	 * if the dentry is positive, else we set it to NULL
++	 * More trouble, who said a *_dir_dentry can't be NULL?
++	 */
++	if(bpath) {
++		/* Cross-Interposing (C), yeah! */
++		hidden_dentry = bpath_walk(dir->i_sb, bpath);
++		if(!hidden_dentry || !hidden_dentry->d_inode) {
++			printk(KERN_CRIT "mini_fo_lookup: bpath_walk failed.\n");
++			err= -EINVAL;
++			goto out;
++		}
++		
++		/* this can be set up safely without fear of spaghetti
++		 * interposing as it is only used for copying times */
++		hidden_dir_dentry = hidden_dentry->d_parent;
++		kfree(bpath);
++	}
++	else if(hidden_dir_dentry && hidden_dir_dentry->d_inode)
++		hidden_dentry = 
++			lookup_one_len(name, hidden_dir_dentry, namelen);
++	else
++		hidden_dentry = NULL;
++
++	if(hidden_sto_dir_dentry && hidden_sto_dir_dentry->d_inode)
++		hidden_sto_dentry = 
++			lookup_one_len(name, hidden_sto_dir_dentry, namelen);
++	else
++		hidden_sto_dentry =  NULL;
++
++	/* catch error in lookup */
++	if (IS_ERR(hidden_dentry) || IS_ERR(hidden_sto_dentry)) {
++		/* mk: we need to call dput on the dentry, whose 
++		 * lookup_one_len operation failed, in order to avoid
++		 * unmount trouble.
++		 */
++		if(IS_ERR(hidden_dentry)) {
++			printk(KERN_CRIT "mini_fo_lookup: ERR from base dentry, lookup failed.\n");
++			err = PTR_ERR(hidden_dentry);
++		} else {
++			dput(hidden_dentry);
++		}
++		if(IS_ERR(hidden_sto_dentry)) {
++			printk(KERN_CRIT "mini_fo_lookup: ERR from storage dentry, lookup failed.\n");
++			err = PTR_ERR(hidden_sto_dentry);
++		} else {
++			dput(hidden_sto_dentry);
++		}
++		goto out;
++	}
++
++	/* allocate dentry private data */
++	__dtopd(dentry) = (struct mini_fo_dentry_info *)
++		kmalloc(sizeof(struct mini_fo_dentry_info), GFP_KERNEL);
++	
++	if (!dtopd(dentry)) {
++		err = -ENOMEM;
++		goto out_dput;
++	}
++
++	/* check for different states of the mini_fo file to be looked up. */
++	
++	/* state 1, file has been modified */
++	if(hidden_dentry && hidden_sto_dentry &&
++	   hidden_dentry->d_inode && hidden_sto_dentry->d_inode && !del_flag) {
++
++		/* update parent directory's atime */
++		fist_copy_attr_atime(dir, hidden_sto_dir_dentry->d_inode);
++
++		dtopd(dentry)->state = MODIFIED;
++		dtohd(dentry) = hidden_dentry;
++		dtohd2(dentry) = hidden_sto_dentry;
++
++		err = mini_fo_tri_interpose(hidden_dentry,
++					    hidden_sto_dentry,
++					    dentry, dir->i_sb, 1);
++		if (err) {
++			printk(KERN_CRIT "mini_fo_lookup: error interposing (state1).\n");
++			goto out_free;
++		}
++		goto out;
++	}
++	/* state 2, file is unmodified */
++	if(hidden_dentry && hidden_dentry->d_inode && !del_flag) {
++
++		fist_copy_attr_atime(dir, hidden_dir_dentry->d_inode);
++
++		dtopd(dentry)->state = UNMODIFIED;
++		dtohd(dentry) = hidden_dentry;
++		dtohd2(dentry) = hidden_sto_dentry; /* could be negative */
++
++		err = mini_fo_tri_interpose(hidden_dentry,
++					    hidden_sto_dentry,
++					    dentry, dir->i_sb, 1);
++		if (err) {
++			printk(KERN_CRIT "mini_fo_lookup: error interposing (state2).\n");
++			goto out_free;
++		}
++		goto out;
++	}
++	/* state 3, file has been newly created */
++	if(hidden_sto_dentry && hidden_sto_dentry->d_inode && !del_flag) {
++
++		fist_copy_attr_atime(dir, hidden_sto_dir_dentry->d_inode);
++		dtopd(dentry)->state = CREATED;
++		dtohd(dentry) = hidden_dentry; /* could be negative */
++		dtohd2(dentry) = hidden_sto_dentry;
++
++		err = mini_fo_tri_interpose(hidden_dentry,
++					    hidden_sto_dentry,
++					    dentry, dir->i_sb, 1);
++		if (err) {
++			printk(KERN_CRIT "mini_fo_lookup: error interposing (state3).\n");
++			goto out_free;
++		}
++		goto out;
++	}
++
++	/* state 4, file has deleted and created again. */
++	if(hidden_dentry && hidden_sto_dentry &&
++	   hidden_dentry->d_inode && 
++	   hidden_sto_dentry->d_inode && del_flag) {
++
++		fist_copy_attr_atime(dir, hidden_sto_dir_dentry->d_inode);
++		dtopd(dentry)->state = DEL_REWRITTEN;
++		dtohd(dentry) = NULL;
++		dtohd2(dentry) = hidden_sto_dentry;
++
++		err = mini_fo_tri_interpose(NULL,
++					    hidden_sto_dentry,
++					    dentry, dir->i_sb, 1);
++		if (err) {
++			printk(KERN_CRIT "mini_fo_lookup: error interposing (state4).\n");
++			goto out_free;
++		}
++		/* We will never need this dentry again, as the file has been
++		 * deleted from base */
++		dput(hidden_dentry);
++		goto out;
++	}
++	/* state 5, file has been deleted in base */
++	if(hidden_dentry && hidden_sto_dentry &&
++	   hidden_dentry->d_inode && 
++	   !hidden_sto_dentry->d_inode && del_flag) {
++
++		/* check which parents atime we need for updating */
++		if(hidden_sto_dir_dentry->d_inode)
++			fist_copy_attr_atime(dir, 
++					     hidden_sto_dir_dentry->d_inode);
++		else
++			fist_copy_attr_atime(dir, 
++					     hidden_dir_dentry->d_inode);
++
++		dtopd(dentry)->state = DELETED;
++		dtohd(dentry) = NULL;
++		dtohd2(dentry) = hidden_sto_dentry;
++
++		/* add negative dentry to dcache to speed up lookups */
++		d_add(dentry, NULL);
++		dput(hidden_dentry);
++		goto out;
++	}
++	/* state 6, file does not exist */
++	if(((hidden_dentry && !hidden_dentry->d_inode) ||
++	    (hidden_sto_dentry && !hidden_sto_dentry->d_inode)) && !del_flag)
++		{
++			/* check which parents atime we need for updating */
++			if(hidden_sto_dir_dentry && hidden_sto_dir_dentry->d_inode)
++				fist_copy_attr_atime(dir, hidden_sto_dir_dentry->d_inode);
++			else
++				fist_copy_attr_atime(dir, hidden_dir_dentry->d_inode);
++
++			dtopd(dentry)->state = NON_EXISTANT;
++			dtohd(dentry) = hidden_dentry;
++			dtohd2(dentry) = hidden_sto_dentry;
++			d_add(dentry, NULL);
++			goto out;
++		}
++
++	/* if we get to here, were in an invalid state. bad. */
++	printk(KERN_CRIT "mini_fo_lookup: ERROR, meta data corruption detected.\n");
++
++	/* end state checking */
++ out_free:
++	d_drop(dentry);		/* so that our bad dentry will get destroyed */
++	kfree(dtopd(dentry));
++	__dtopd(dentry) = NULL;	/* be safe */
++
++ out_dput:
++	if(hidden_dentry)
++		dput(hidden_dentry);
++	if(hidden_sto_dentry)
++		dput(hidden_sto_dentry); /* drops usage count and marks for release */
++
++ out:
++	/* initalize wol if file exists and is directory */
++	if(dentry->d_inode) {
++		if(S_ISDIR(dentry->d_inode->i_mode)) {
++			itopd(dentry->d_inode)->deleted_list_size = -1;
++			itopd(dentry->d_inode)->renamed_list_size = -1;
++			meta_build_lists(dentry);
++		}
++	}
++	return ERR_PTR(err);
++}
++
++
++STATIC int
++mini_fo_link(dentry_t *old_dentry, inode_t *dir, dentry_t *new_dentry)
++{
++	int err;
++	dentry_t *hidden_old_dentry;
++	dentry_t *hidden_new_dentry;
++	dentry_t *hidden_dir_dentry;
++
++
++	check_mini_fo_dentry(old_dentry);
++	check_mini_fo_dentry(new_dentry);
++	check_mini_fo_inode(dir);
++
++	/* no links to directorys and existing targets target allowed */
++	if(S_ISDIR(old_dentry->d_inode->i_mode) ||
++	   is_mini_fo_existant(new_dentry)) {
++		err = -EPERM;
++		goto out;
++	}
++
++	/* bring it directly from unmod to del_rew */
++	if(dtost(old_dentry) == UNMODIFIED) {
++		err = nondir_unmod_to_mod(old_dentry, 1);
++		if(err) {
++			err = -EINVAL;
++			goto out;
++		}
++		err = meta_add_d_entry(old_dentry->d_parent,
++				       old_dentry->d_name.name,
++				       old_dentry->d_name.len);
++		if(err) {
++			err = -EINVAL;
++			goto out;
++		}
++		dput(dtohd(old_dentry));
++		dtohd(old_dentry) = NULL;
++		dtost(old_dentry) = DEL_REWRITTEN;
++	}
++	
++	err = get_neg_sto_dentry(new_dentry);
++	if(err) {
++		err = -EINVAL;
++		goto out;
++	}
++
++	hidden_old_dentry = dtohd2(old_dentry);
++	hidden_new_dentry = dtohd2(new_dentry);
++
++	dget(hidden_old_dentry);
++	dget(hidden_new_dentry);
++
++	/* was: hidden_dir_dentry = lock_parent(hidden_new_dentry); */
++	hidden_dir_dentry = dget(hidden_new_dentry->d_parent);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_dir_dentry->d_inode->i_mutex);
++#else
++	down(&hidden_dir_dentry->d_inode->i_sem);
++#endif
++
++	err = vfs_link(hidden_old_dentry,
++		       hidden_dir_dentry->d_inode,
++		       hidden_new_dentry);
++	if (err || !hidden_new_dentry->d_inode)
++		goto out_lock;
++
++	dtost(new_dentry) = CREATED;
++ 	err = mini_fo_tri_interpose(NULL, hidden_new_dentry, new_dentry, dir->i_sb, 0);
++	if (err)
++		goto out_lock;
++
++	fist_copy_attr_timesizes(dir, hidden_new_dentry->d_inode);
++	/* propagate number of hard-links */
++	old_dentry->d_inode->i_nlink = itohi2(old_dentry->d_inode)->i_nlink;
++
++ out_lock:
++	/* was: unlock_dir(hidden_dir_dentry); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_dir_dentry->d_inode->i_sem);
++#endif
++	dput(hidden_dir_dentry);
++
++	dput(hidden_new_dentry);
++	dput(hidden_old_dentry);
++	if (!new_dentry->d_inode)
++		d_drop(new_dentry);
++
++ out:
++	return err;
++}
++
++
++STATIC int
++mini_fo_unlink(inode_t *dir, dentry_t *dentry)
++{
++	int err = 0;
++
++	dget(dentry);
++	if(dtopd(dentry)->state == MODIFIED) {
++		err = nondir_mod_to_del(dentry);
++		goto out;
++	}
++	else if(dtopd(dentry)->state == UNMODIFIED) {
++		err = nondir_unmod_to_del(dentry);
++		goto out;
++	}
++	else if(dtopd(dentry)->state == CREATED) {
++		err = nondir_creat_to_del(dentry);
++		goto out;
++	}
++	else if(dtopd(dentry)->state == DEL_REWRITTEN) {
++		err = nondir_del_rew_to_del(dentry);
++		goto out;
++	}
++
++	printk(KERN_CRIT "mini_fo_unlink: ERROR, invalid state detected.\n");
++
++ out:
++	fist_copy_attr_times(dir, itohi2(dentry->d_parent->d_inode));
++
++	if(!err) {
++		/* is this causing my pain? d_delete(dentry); */
++		d_drop(dentry);
++	}
++
++	dput(dentry);
++	return err;
++}
++
++
++STATIC int
++mini_fo_symlink(inode_t *dir, dentry_t *dentry, const char *symname)
++{
++	int err=0;
++	dentry_t *hidden_sto_dentry;
++	dentry_t *hidden_sto_dir_dentry;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++        umode_t mode;
++#endif
++
++	/* Fail if the symlink file exists */
++	if(!(dtost(dentry) == DELETED || 
++	     dtost(dentry) == NON_EXISTANT)) {
++		err = -EEXIST;
++		goto out;
++	}
++
++	err = get_neg_sto_dentry(dentry);
++	if(err) {
++		err = -EINVAL;
++		goto out;
++	}
++	hidden_sto_dentry = dtohd2(dentry);
++
++	dget(hidden_sto_dentry);
++	/* was: hidden_sto_dir_dentry = lock_parent(hidden_sto_dentry); */
++	hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++	mode = S_IALLUGO;
++	err = vfs_symlink(hidden_sto_dir_dentry->d_inode,
++			  hidden_sto_dentry, symname, mode);
++#else
++	err = vfs_symlink(hidden_sto_dir_dentry->d_inode,
++			  hidden_sto_dentry,
++			  symname);
++#endif
++	if (err || !hidden_sto_dentry->d_inode)
++                goto out_lock;
++
++        if(dtost(dentry) == DELETED) {
++                dtost(dentry) = DEL_REWRITTEN;
++                err = mini_fo_tri_interpose(NULL, hidden_sto_dentry, dentry, dir->i_sb, 0);
++                if(err)
++                        goto out_lock;
++        } else if(dtost(dentry) == NON_EXISTANT) {
++                dtost(dentry) = CREATED;
++                err = mini_fo_tri_interpose(dtohd(dentry), hidden_sto_dentry, dentry, dir->i_sb, 0);
++                if(err)
++                        goto out_lock;
++        }
++	fist_copy_attr_timesizes(dir, hidden_sto_dir_dentry->d_inode);
++	
++ out_lock:
++        /* was: unlock_dir(hidden_sto_dir_dentry); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	dput(hidden_sto_dir_dentry);
++
++        dput(hidden_sto_dentry);
++        if (!dentry->d_inode)
++                d_drop(dentry);
++ out:
++        return err;
++}
++
++STATIC int
++mini_fo_mkdir(inode_t *dir, dentry_t *dentry, int mode)
++{
++	int err;
++
++	err = create_sto_dir(dentry, mode);
++
++	check_mini_fo_dentry(dentry);
++
++	return err;
++}
++
++
++STATIC int
++mini_fo_rmdir(inode_t *dir, dentry_t *dentry)
++{
++	int err = 0;
++	
++	dentry_t *hidden_sto_dentry;
++	dentry_t *hidden_sto_dir_dentry;
++	dentry_t *meta_dentry;
++	inode_t *hidden_sto_dir = NULL;
++
++	check_mini_fo_dentry(dentry);
++	check_mini_fo_inode(dir);
++
++	dget(dentry);
++	if(dtopd(dentry)->state == MODIFIED) {
++		/* XXX: disabled, because it does not bother to check files on
++		 * the original filesystem - just a hack, but better than simply
++		 * removing it without testing */
++		err = -EINVAL;
++		goto out;
++
++		hidden_sto_dir = itohi2(dir);
++		hidden_sto_dentry = dtohd2(dentry);
++
++		/* was:hidden_sto_dir_dentry = lock_parent(hidden_sto_dentry); */
++		hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++		down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++
++		/* avoid destroying the hidden inode if the file is in use */
++		dget(hidden_sto_dentry);
++
++		/* Delete an old WOL file contained in the storage dir */
++		meta_dentry = lookup_one_len(META_FILENAME, 
++					     hidden_sto_dentry, 
++					     strlen(META_FILENAME));
++		if(meta_dentry->d_inode) {
++			err = vfs_unlink(hidden_sto_dentry->d_inode, meta_dentry);
++			dput(meta_dentry);
++			if(!err)
++				d_delete(meta_dentry);
++		}
++
++		err = vfs_rmdir(hidden_sto_dir, hidden_sto_dentry);
++		dput(hidden_sto_dentry);
++		if(!err)
++			d_delete(hidden_sto_dentry);
++
++		/* propagate number of hard-links */
++		dentry->d_inode->i_nlink = itohi2(dentry->d_inode)->i_nlink;
++
++		dput(dtohd(dentry));
++		
++		dtohd(dentry) = NULL;
++		dtopd(dentry)->state = DELETED;
++
++		/* carefull with R files */
++		if( __meta_is_r_entry(dir, 
++				      dentry->d_name.name, 
++				      dentry->d_name.len) == 1) {
++			err = meta_remove_r_entry(dentry->d_parent, 
++						  dentry->d_name.name,
++						  dentry->d_name.len);
++			if(err) {
++				printk(KERN_CRIT "mini_fo: rmdir: meta_remove_r_entry failed.\n");
++				goto out;
++			}
++		}
++		else {
++			/* ok, add deleted file to META */		
++			meta_add_d_entry(dentry->d_parent, 
++					 dentry->d_name.name, 
++					 dentry->d_name.len);
++		}
++		/* was: unlock_dir(hidden_sto_dir_dentry); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++		up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++		dput(hidden_sto_dir_dentry);
++		goto out;
++	}
++	else if(dtopd(dentry)->state == UNMODIFIED) {
++		/* XXX: simply adding it to the delete list here is fscking dangerous!
++		 * as a temporary hack, i will disable rmdir on unmodified directories 
++		 * for now.
++		 */
++		err = -EINVAL;
++		goto out;
++
++		err = get_neg_sto_dentry(dentry);
++		if(err) {
++			err = -EINVAL;
++			goto out;
++		}
++		
++		/* dput base dentry, this will relase the inode and free the
++		 * dentry, as we will never need it again. */
++		dput(dtohd(dentry));
++		dtohd(dentry) = NULL;
++		dtopd(dentry)->state = DELETED;
++
++		/* add deleted file to META-file */
++		meta_add_d_entry(dentry->d_parent, 
++				 dentry->d_name.name, 
++				 dentry->d_name.len);
++		goto out;
++	}
++	else if(dtopd(dentry)->state == CREATED) {
++		hidden_sto_dir = itohi2(dir);
++		hidden_sto_dentry = dtohd2(dentry);
++
++		/* was: hidden_sto_dir_dentry = lock_parent(hidden_sto_dentry);*/
++		hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++		down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++
++		/* avoid destroying the hidden inode if the file is in use */
++		dget(hidden_sto_dentry);
++
++		/* Delete an old WOL file contained in the storage dir */
++		meta_dentry = lookup_one_len(META_FILENAME, 
++					     hidden_sto_dentry, 
++					     strlen(META_FILENAME));
++		if(meta_dentry->d_inode) {
++			/* is this necessary? dget(meta_dentry); */
++			err = vfs_unlink(hidden_sto_dentry->d_inode, 
++					 meta_dentry);
++			dput(meta_dentry);
++			if(!err)
++				d_delete(meta_dentry);
++		}
++
++		err = vfs_rmdir(hidden_sto_dir, hidden_sto_dentry);
++		dput(hidden_sto_dentry);
++		if(!err)
++			d_delete(hidden_sto_dentry);
++
++		/* propagate number of hard-links */
++		dentry->d_inode->i_nlink = itohi2(dentry->d_inode)->i_nlink;
++		dtopd(dentry)->state = NON_EXISTANT;
++
++		/* was: unlock_dir(hidden_sto_dir_dentry); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++		up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++		dput(hidden_sto_dir_dentry);
++
++		goto out;
++	}
++	else if(dtopd(dentry)->state == DEL_REWRITTEN) {
++		hidden_sto_dir = itohi2(dir);
++		hidden_sto_dentry = dtohd2(dentry);
++
++		/* was: hidden_sto_dir_dentry = lock_parent(hidden_sto_dentry);*/
++		hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++		down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++
++		/* avoid destroying the hidden inode if the file is in use */
++		dget(hidden_sto_dentry);
++
++		/* Delete an old WOL file contained in the storage dir */
++		meta_dentry = lookup_one_len(META_FILENAME, 
++					     hidden_sto_dentry, 
++					     strlen(META_FILENAME));
++		if(meta_dentry->d_inode) {
++			/* is this necessary? dget(meta_dentry); */
++			err = vfs_unlink(hidden_sto_dentry->d_inode,
++					 meta_dentry);
++			dput(meta_dentry);
++			if(!err)
++				d_delete(meta_dentry);
++		}
++
++		err = vfs_rmdir(hidden_sto_dir, hidden_sto_dentry);
++		dput(hidden_sto_dentry);
++		if(!err)
++			d_delete(hidden_sto_dentry);
++
++		/* propagate number of hard-links */
++		dentry->d_inode->i_nlink = itohi2(dentry->d_inode)->i_nlink;
++		dtopd(dentry)->state = DELETED;
++		/* was: unlock_dir(hidden_sto_dir_dentry); */
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++		up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++		dput(hidden_sto_dir_dentry);
++		goto out;
++	}
++
++	printk(KERN_CRIT "mini_fo_rmdir: ERROR, invalid state detected.\n");
++
++ out:
++	if(!err) {
++		d_drop(dentry);
++	}
++		
++	fist_copy_attr_times(dir, itohi2(dentry->d_parent->d_inode));
++	dput(dentry);
++
++	return err;
++}
++
++
++STATIC int
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++mini_fo_mknod(inode_t *dir, dentry_t *dentry, int mode, dev_t dev)
++#else
++mini_fo_mknod(inode_t *dir, dentry_t *dentry, int mode, int dev)
++#endif
++{
++	int err = 0;
++
++	check_mini_fo_dentry(dentry);
++
++	err = create_sto_nod(dentry, mode, dev);
++	if(err) {
++		printk(KERN_CRIT "mini_fo_mknod: creating sto nod failed.\n");
++		err = -EINVAL;
++	}
++	
++	check_mini_fo_dentry(dentry);
++	return err;
++}
++
++
++STATIC int
++mini_fo_rename(inode_t *old_dir, dentry_t *old_dentry,
++	       inode_t *new_dir, dentry_t *new_dentry)
++{
++	/* dispatch */
++	if(S_ISDIR(old_dentry->d_inode->i_mode))
++		return rename_directory(old_dir, old_dentry, new_dir, new_dentry);
++	return rename_nondir(old_dir, old_dentry, new_dir, new_dentry);
++	
++}
++
++int rename_directory(inode_t *old_dir, dentry_t *old_dentry,
++		     inode_t *new_dir, dentry_t *new_dentry)
++{
++	int err, bpath_len;
++	char *bpath;
++
++	dentry_t *hidden_old_dentry;
++	dentry_t *hidden_new_dentry;
++	dentry_t *hidden_old_dir_dentry;
++	dentry_t *hidden_new_dir_dentry;
++
++	err = 0;
++	bpath = NULL;
++	bpath_len = 0;
++
++	/* this is a test, chuck out if it works */
++	if(!(dtopd(new_dentry)->state == DELETED ||
++	     dtopd(new_dentry)->state == NON_EXISTANT)) {
++		printk(KERN_CRIT "mini_fo: rename_directory: \
++                                  uh, ah, new_dentry not negative.\n");
++		/* return -1; */
++	}
++	
++	/* state = UNMODIFIED */
++	if(dtopd(old_dentry)->state == UNMODIFIED) {
++		err = dir_unmod_to_mod(old_dentry);
++		if (err) 
++			goto out;
++	}
++
++	/* state = MODIFIED */
++	if(dtopd(old_dentry)->state == MODIFIED) {
++		bpath = meta_check_r_entry(old_dentry->d_parent, 
++					   old_dentry->d_name.name,
++					   old_dentry->d_name.len);
++		if(bpath) {
++			err = meta_remove_r_entry(old_dentry->d_parent,
++						  old_dentry->d_name.name,
++						  old_dentry->d_name.len);
++			if(err) {
++				printk(KERN_CRIT "mini_fo: rename_directory:\
++                                                   meta_remove_r_entry \
++                                                  failed.\n");
++				goto out;
++			}
++			err = meta_add_r_entry(new_dentry->d_parent,
++					       bpath,
++					       strlen(bpath),
++					       new_dentry->d_name.name,
++					       new_dentry->d_name.len);
++			kfree(bpath);
++		}
++		else {/* wol it */
++			err = meta_add_d_entry(old_dentry->d_parent, 
++					       old_dentry->d_name.name,
++					       old_dentry->d_name.len);
++			if (err) 
++				goto out;
++			/* put it on rename list */
++			err = get_mini_fo_bpath(old_dentry,
++						&bpath, 
++						&bpath_len);
++			if (err) 
++				goto out;
++			err = meta_add_r_entry(new_dentry->d_parent,
++					       bpath, bpath_len,
++					       new_dentry->d_name.name,
++					       new_dentry->d_name.len);
++			if (err) 
++				goto out;
++		}
++		/* no state change, MODIFIED stays MODIFIED */
++	}
++	/* state = CREATED */
++	if(dtopd(old_dentry)->state == CREATED ||
++	   dtopd(old_dentry)->state == DEL_REWRITTEN) {
++		if(dtohd(old_dentry))
++			dput(dtohd(old_dentry));
++		
++		if(dtopd(new_dentry)->state == DELETED) {
++			dtopd(old_dentry)->state = DEL_REWRITTEN;
++			dtohd(old_dentry) = NULL;
++		} 
++		else if(dtopd(new_dentry)->state == NON_EXISTANT) {
++			dtopd(old_dentry)->state = CREATED;
++			/* steal new dentry's neg. base dentry */
++			dtohd(old_dentry) = dtohd(new_dentry);
++			dtohd(new_dentry) = NULL;
++		}
++	}		
++	if(dtopd(new_dentry)->state == UNMODIFIED ||
++	   dtopd(new_dentry)->state == NON_EXISTANT) {
++		err = get_neg_sto_dentry(new_dentry);
++		if(err)
++			goto out;
++	}
++			
++	/* now move sto file */
++	hidden_old_dentry = dtohd2(old_dentry);
++	hidden_new_dentry = dtohd2(new_dentry);
++	
++	dget(hidden_old_dentry);
++	dget(hidden_new_dentry);
++	
++	hidden_old_dir_dentry = dget(hidden_old_dentry->d_parent);
++	hidden_new_dir_dentry = dget(hidden_new_dentry->d_parent);
++	double_lock(hidden_old_dir_dentry, hidden_new_dir_dentry);
++	
++	err = vfs_rename(hidden_old_dir_dentry->d_inode, hidden_old_dentry,
++			 hidden_new_dir_dentry->d_inode, hidden_new_dentry);
++	if(err)
++		goto out_lock;
++	
++	fist_copy_attr_all(new_dir, hidden_new_dir_dentry->d_inode);
++	if (new_dir != old_dir)
++		fist_copy_attr_all(old_dir, 
++				   hidden_old_dir_dentry->d_inode);
++	
++ out_lock:
++	/* double_unlock will dput the new/old parent dentries
++	 * whose refcnts were incremented via get_parent above. */
++	double_unlock(hidden_old_dir_dentry, hidden_new_dir_dentry);
++	dput(hidden_new_dentry);
++	dput(hidden_old_dentry);
++	
++ out:
++	return err;
++}
++
++int rename_nondir(inode_t *old_dir, dentry_t *old_dentry,
++		  inode_t *new_dir, dentry_t *new_dentry)
++{
++	int err=0;
++
++	check_mini_fo_dentry(old_dentry);
++	check_mini_fo_dentry(new_dentry);
++	check_mini_fo_inode(old_dir);
++	check_mini_fo_inode(new_dir);
++
++	/* state: UNMODIFIED */
++	if(dtost(old_dentry) == UNMODIFIED) {
++		err = nondir_unmod_to_mod(old_dentry, 1);
++		if(err) {
++			err = -EINVAL;
++			goto out;
++		}
++	}
++
++	/* the easy states */
++	if(exists_in_storage(old_dentry)) {
++		
++		dentry_t *hidden_old_dentry;
++		dentry_t *hidden_new_dentry;
++		dentry_t *hidden_old_dir_dentry;
++		dentry_t *hidden_new_dir_dentry;
++
++		/* if old file is MODIFIED, add it to the deleted_list */
++		if(dtopd(old_dentry)->state == MODIFIED) {
++			meta_add_d_entry(old_dentry->d_parent,
++					 old_dentry->d_name.name,
++					 old_dentry->d_name.len);
++
++			dput(dtohd(old_dentry));
++		}
++		/* if old file is CREATED, we only release the base dentry */
++		if(dtopd(old_dentry)->state == CREATED) {
++			if(dtohd(old_dentry))
++				dput(dtohd(old_dentry));
++		}
++
++		/* now setup the new states (depends on new_dentry state) */
++		/* new dentry state =  MODIFIED */
++		if(dtopd(new_dentry)->state == MODIFIED) {
++			meta_add_d_entry(new_dentry->d_parent,
++					 new_dentry->d_name.name,
++					 new_dentry->d_name.len);
++
++			/* new dentry will be d_put'ed later by the vfs
++			 * so don't do it here
++			 * dput(dtohd(new_dentry));
++			 */
++			dtohd(old_dentry) = NULL;
++			dtopd(old_dentry)->state = DEL_REWRITTEN;
++		}
++		/* new dentry state =  UNMODIFIED */
++		else if(dtopd(new_dentry)->state == UNMODIFIED) {
++			if(get_neg_sto_dentry(new_dentry))
++				return -EINVAL;
++
++			meta_add_d_entry(new_dentry->d_parent,
++					 new_dentry->d_name.name,
++					 new_dentry->d_name.len);
++
++			/* is this right??? */
++			/*dput(dtohd(new_dentry));*/
++			dtohd(old_dentry) = NULL;
++			dtopd(old_dentry)->state = DEL_REWRITTEN;
++		}
++		/* new dentry state =  CREATED */
++		else if(dtopd(new_dentry)->state == CREATED) {
++			/* we keep the neg. base dentry (if exists) */
++			dtohd(old_dentry) = dtohd(new_dentry);
++			/* ...and set it to Null, or we'll get
++			 * dcache.c:345 if it gets dput twice... */
++			dtohd(new_dentry) = NULL;
++			dtopd(old_dentry)->state = CREATED;
++		}
++		/* new dentry state =  NON_EXISTANT */
++		else if(dtopd(new_dentry)->state == NON_EXISTANT) {
++			if(get_neg_sto_dentry(new_dentry))
++				return -EINVAL;
++
++			/* we keep the neg. base dentry (if exists) */
++			dtohd(old_dentry) = dtohd(new_dentry);
++			/* ...and set it to Null, or we'll get 
++			 * Dr. dcache.c:345 if it gets dput twice... */
++			dtohd(new_dentry) = NULL;
++			dtopd(old_dentry)->state = CREATED;
++		}
++		/* new dentry state =  DEL_REWRITTEN or DELETED */
++		else if(dtopd(new_dentry)->state == DEL_REWRITTEN ||
++			dtopd(new_dentry)->state == DELETED) {
++			dtohd(old_dentry) = NULL;
++			dtopd(old_dentry)->state = DEL_REWRITTEN;
++		}
++		else { /* not possible, uhh, ahh */
++			printk(KERN_CRIT 
++			       "mini_fo: rename_reg_file: invalid state detected [1].\n");
++			return -1;
++		}
++		
++		/* now we definitely have a sto file */
++		hidden_old_dentry = dtohd2(old_dentry);
++		hidden_new_dentry = dtohd2(new_dentry);
++
++		dget(hidden_old_dentry);
++		dget(hidden_new_dentry);
++		
++		hidden_old_dir_dentry = dget(hidden_old_dentry->d_parent);
++		hidden_new_dir_dentry = dget(hidden_new_dentry->d_parent);
++		double_lock(hidden_old_dir_dentry, hidden_new_dir_dentry);
++
++		err = vfs_rename(hidden_old_dir_dentry->d_inode, 
++				 hidden_old_dentry,
++				 hidden_new_dir_dentry->d_inode, 
++				 hidden_new_dentry);
++		if(err) 
++			goto out_lock;
++
++		fist_copy_attr_all(new_dir, hidden_new_dir_dentry->d_inode);
++		if (new_dir != old_dir)
++			fist_copy_attr_all(old_dir, hidden_old_dir_dentry->d_inode);
++		
++	out_lock:
++		/* double_unlock will dput the new/old parent dentries 
++		 * whose refcnts were incremented via get_parent above.
++		 */
++		double_unlock(hidden_old_dir_dentry, hidden_new_dir_dentry);
++		dput(hidden_new_dentry);
++		dput(hidden_old_dentry);
++	out:		
++		return err;
++	}
++	else { /* invalid state */
++		printk(KERN_CRIT "mini_fo: rename_reg_file: ERROR: invalid state detected [2].\n");
++		return -1;
++	}
++}
++
++
++STATIC int
++mini_fo_readlink(dentry_t *dentry, char *buf, int bufsiz)
++{
++	int err=0;
++	dentry_t *hidden_dentry = NULL;
++
++	if(dtohd2(dentry) && dtohd2(dentry)->d_inode) {
++		hidden_dentry = dtohd2(dentry);
++	} else if(dtohd(dentry) && dtohd(dentry)->d_inode) {
++		hidden_dentry = dtohd(dentry);
++	} else {
++		goto out;
++	}
++
++	if (!hidden_dentry->d_inode->i_op ||
++	    !hidden_dentry->d_inode->i_op->readlink) {
++		err = -EINVAL;		goto out;
++	}
++
++	err = hidden_dentry->d_inode->i_op->readlink(hidden_dentry,
++						     buf,
++						     bufsiz);
++	if (err > 0)
++		fist_copy_attr_atime(dentry->d_inode, hidden_dentry->d_inode);
++
++ out:
++	return err;
++}
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
++static int mini_fo_follow_link(dentry_t *dentry, struct nameidata *nd)
++#else
++static void* mini_fo_follow_link(dentry_t *dentry, struct nameidata *nd)
++#endif
++{
++	char *buf;
++	int len = PAGE_SIZE, err;
++	mm_segment_t old_fs;
++
++	/* in 2.6 this is freed by mini_fo_put_link called by __do_follow_link */
++	buf = kmalloc(len, GFP_KERNEL);
++	if (!buf) {
++		err = -ENOMEM;
++		goto out;
++	}
++
++	/* read the symlink, and then we will follow it */
++	old_fs = get_fs();
++	set_fs(KERNEL_DS);
++	err = dentry->d_inode->i_op->readlink(dentry, buf, len);
++	set_fs(old_fs);
++	if (err < 0) {
++		kfree(buf);
++		buf = NULL;
++		goto out;
++	}
++	buf[err] = 0;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++        nd_set_link(nd, buf);
++        err = 0;
++#else
++	err = vfs_follow_link(nd, buf);
++#endif
++
++ out:
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++	kfree(buf);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
++        return err;
++#else
++        return ERR_PTR(err);
++#endif
++}
++
++STATIC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
++void mini_fo_put_link(struct dentry *dentry, struct nameidata *nd)
++#else
++void mini_fo_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
++#endif
++{
++        char *link;
++        link = nd_get_link(nd);
++        kfree(link);
++}
++#endif
++
++STATIC int
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++mini_fo_permission(inode_t *inode, int mask, struct nameidata *nd)
++#else
++mini_fo_permission(inode_t *inode, int mask)
++#endif
++{
++	inode_t *hidden_inode;
++	int mode;
++	int err;
++
++	if(itohi2(inode)) {
++		hidden_inode = itohi2(inode);
++	} else {
++		hidden_inode = itohi(inode);
++	}
++	mode = inode->i_mode;
++
++	/* not really needed, as permission handles everything:
++	 *	err = vfs_permission(inode, mask);
++	 *	if (err)
++	 *		goto out;
++	 */
++	
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++	err = permission(hidden_inode, mask, nd);
++#else
++	err = permission(hidden_inode, mask);
++#endif
++	
++	/*  out: */
++	return err;
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++STATIC int
++mini_fo_inode_revalidate(dentry_t *dentry)
++{
++	int err = 0;
++	dentry_t *hidden_dentry;
++	inode_t *hidden_inode;
++
++	ASSERT(dentry->d_inode);
++	ASSERT(itopd(dentry->d_inode));
++
++	if(itohi2(dentry->d_inode)) {
++                hidden_dentry = dtohd2(dentry);
++		hidden_inode = hidden_dentry->d_inode;
++	} else if(itohi(dentry->d_inode)) {
++                hidden_dentry = dtohd(dentry);
++		hidden_inode = hidden_dentry->d_inode;
++	} else {
++                printk(KERN_CRIT "mini_fo_inode_revalidate: ERROR, invalid state detected.\n");
++                err = -ENOENT;
++                goto out;
++        }
++	if (hidden_inode && hidden_inode->i_op && hidden_inode->i_op->revalidate){
++		err = hidden_inode->i_op->revalidate(hidden_dentry);
++		if (err)
++			goto out;
++	}
++	fist_copy_attr_all(dentry->d_inode, hidden_inode);
++ out:
++	return err;
++}
++#endif
++
++STATIC int
++mini_fo_setattr(dentry_t *dentry, struct iattr *ia)
++{
++	int err = 0;
++
++	check_mini_fo_dentry(dentry);
++	
++	if(!is_mini_fo_existant(dentry)) {
++		printk(KERN_CRIT "mini_fo_setattr: ERROR, invalid state detected [1].\n");
++		goto out;
++	}
++
++	if(dtost(dentry) == UNMODIFIED) {
++		if(!IS_COPY_FLAG(ia->ia_valid))
++			goto out; /* we ignore these changes to base */
++
++		if(S_ISDIR(dentry->d_inode->i_mode)) {
++			err = dir_unmod_to_mod(dentry);
++		} else {
++			/* we copy contents if file is not beeing truncated */
++			if(S_ISREG(dentry->d_inode->i_mode) && 
++			   !(ia->ia_size == 0 && (ia->ia_valid & ATTR_SIZE))) {
++				err = nondir_unmod_to_mod(dentry, 1);
++			} else
++				err = nondir_unmod_to_mod(dentry, 0);
++		}
++		if(err) {
++			err = -EINVAL;
++			printk(KERN_CRIT "mini_fo_setattr: ERROR changing states.\n");
++			goto out;
++		}
++	}
++	if(!exists_in_storage(dentry)) {
++		printk(KERN_CRIT "mini_fo_setattr: ERROR, invalid state detected [2].\n");
++		err = -EINVAL;
++		goto out;
++	}
++	ASSERT(dentry->d_inode);
++	ASSERT(dtohd2(dentry));
++	ASSERT(itopd(dentry->d_inode));
++	ASSERT(itohi2(dentry->d_inode));
++	
++	err = notify_change(dtohd2(dentry), ia);
++	fist_copy_attr_all(dentry->d_inode, itohi2(dentry->d_inode));
++ out:
++	return err;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++STATIC int
++mini_fo_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
++{
++	int err = 0;
++        dentry_t *hidden_dentry;
++
++	ASSERT(dentry->d_inode);
++	ASSERT(itopd(dentry->d_inode));
++
++	if(itohi2(dentry->d_inode)) {
++                hidden_dentry = dtohd2(dentry);
++	} else if(itohi(dentry->d_inode)) {
++                hidden_dentry = dtohd(dentry);
++	} else {
++                printk(KERN_CRIT "mini_fo_getattr: ERROR, invalid state detected.\n");
++                err = -ENOENT;
++                goto out;
++        }
++	fist_copy_attr_all(dentry->d_inode, hidden_dentry->d_inode);
++
++	ASSERT(hidden_dentry);
++	ASSERT(hidden_dentry->d_inode);
++	ASSERT(hidden_dentry->d_inode->i_op);
++
++	generic_fillattr(dentry->d_inode, stat);
++	if (!stat->blksize) {
++		struct super_block *s = hidden_dentry->d_inode->i_sb;
++		unsigned blocks;
++		blocks = (stat->size+s->s_blocksize-1) >> s->s_blocksize_bits;
++		stat->blocks = (s->s_blocksize / 512) * blocks;
++		stat->blksize = s->s_blocksize;
++	}
++ out:
++        return err;
++}
++#endif
++
++#if defined(XATTR) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20))
++#if 0 /* no xattr_alloc() and xattr_free() */
++/* This is lifted from fs/xattr.c */
++static void *
++xattr_alloc(size_t size, size_t limit)
++{
++	void *ptr;
++
++	if (size > limit)
++		return ERR_PTR(-E2BIG);
++
++	if (!size)	/* size request, no buffer is needed */
++		return NULL;
++	else if (size <= PAGE_SIZE)
++		ptr = kmalloc((unsigned long) size, GFP_KERNEL);
++	else
++		ptr = vmalloc((unsigned long) size);
++	if (!ptr)
++		return ERR_PTR(-ENOMEM);
++	return ptr;
++}
++
++static void
++xattr_free(void *ptr, size_t size)
++{
++	if (!size)	/* size request, no buffer was needed */
++		return;
++	else if (size <= PAGE_SIZE)
++		kfree(ptr);
++	else
++		vfree(ptr);
++}
++#endif /* no xattr_alloc() and xattr_free() */
++
++/* BKL held by caller.
++ * dentry->d_inode->i_sem down
++ */
++STATIC int
++mini_fo_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) {
++	struct dentry *hidden_dentry = NULL;
++	int err = -EOPNOTSUPP;
++	/* Define these anyway so we don't need as much ifdef'ed code. */
++	char *encoded_name = NULL;
++	char *encoded_value = NULL;
++
++	check_mini_fo_dentry(dentry);
++
++	if(exists_in_storage(dentry))
++		hidden_dentry = dtohd2(dentry);
++	else
++		hidden_dentry = dtohd(dentry);
++	   
++	ASSERT(hidden_dentry);
++	ASSERT(hidden_dentry->d_inode);
++	ASSERT(hidden_dentry->d_inode->i_op);
++
++	if (hidden_dentry->d_inode->i_op->getxattr) {
++		encoded_name = (char *)name;
++		encoded_value = (char *)value;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&hidden_dentry->d_inode->i_mutex);
++#else
++		down(&hidden_dentry->d_inode->i_sem);
++#endif
++		/* lock_kernel() already done by caller. */
++		err = hidden_dentry->d_inode->i_op->getxattr(hidden_dentry, encoded_name, encoded_value, size);
++		/* unlock_kernel() will be done by caller. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&hidden_dentry->d_inode->i_mutex);
++#else
++		up(&hidden_dentry->d_inode->i_sem);
++#endif
++	}
++	return err;
++}
++
++/* BKL held by caller.
++ * dentry->d_inode->i_sem down
++ */
++STATIC int
++#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,21) \
++     && LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,23)) \
++     || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++mini_fo_setxattr(struct dentry *dentry, const char *name, 
++		 const void *value, size_t size, int flags)
++#else
++mini_fo_setxattr(struct dentry *dentry, const char *name, 
++		 void *value, size_t size, int flags)
++#endif
++
++{
++	struct dentry *hidden_dentry = NULL;
++	int err = -EOPNOTSUPP;
++
++	/* Define these anyway, so we don't have as much ifdef'ed code. */
++	char *encoded_value = NULL;
++	char *encoded_name = NULL;
++
++	check_mini_fo_dentry(dentry);
++
++	if(exists_in_storage(dentry))
++		hidden_dentry = dtohd2(dentry);
++	else
++		hidden_dentry = dtohd(dentry);
++	
++	ASSERT(hidden_dentry);
++	ASSERT(hidden_dentry->d_inode);
++	ASSERT(hidden_dentry->d_inode->i_op);
++
++	if (hidden_dentry->d_inode->i_op->setxattr) {
++		encoded_name = (char *)name;
++		encoded_value = (char *)value;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&hidden_dentry->d_inode->i_mutex);
++#else
++		down(&hidden_dentry->d_inode->i_sem);
++#endif
++		/* lock_kernel() already done by caller. */
++		err = hidden_dentry->d_inode->i_op->setxattr(hidden_dentry, encoded_name, encoded_value, size, flags);
++		/* unlock_kernel() will be done by caller. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&hidden_dentry->d_inode->i_mutex);
++#else
++		up(&hidden_dentry->d_inode->i_sem);
++#endif
++	}
++	return err;
++}
++
++/* BKL held by caller.
++ * dentry->d_inode->i_sem down
++ */
++STATIC int
++mini_fo_removexattr(struct dentry *dentry, const char *name) {
++	struct dentry *hidden_dentry = NULL;
++	int err = -EOPNOTSUPP;
++	char *encoded_name;
++
++	check_mini_fo_dentry(dentry);
++
++	if(exists_in_storage(dentry))
++		hidden_dentry = dtohd2(dentry);
++	else
++		hidden_dentry = dtohd(dentry);
++	
++	ASSERT(hidden_dentry);
++	ASSERT(hidden_dentry->d_inode);
++	ASSERT(hidden_dentry->d_inode->i_op);
++
++	if (hidden_dentry->d_inode->i_op->removexattr) {
++		encoded_name = (char *)name;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&hidden_dentry->d_inode->i_mutex);
++#else
++		down(&hidden_dentry->d_inode->i_sem);
++#endif
++		/* lock_kernel() already done by caller. */
++		err = hidden_dentry->d_inode->i_op->removexattr(hidden_dentry, encoded_name);
++		/* unlock_kernel() will be done by caller. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&hidden_dentry->d_inode->i_mutex);
++#else
++		up(&hidden_dentry->d_inode->i_sem);
++#endif
++	}
++	return err;
++}
++
++/* BKL held by caller.
++ * dentry->d_inode->i_sem down
++ */
++STATIC int
++mini_fo_listxattr(struct dentry *dentry, char *list, size_t size) {
++	struct dentry *hidden_dentry = NULL;
++	int err = -EOPNOTSUPP;
++	char *encoded_list = NULL;
++
++	check_mini_fo_dentry(dentry);
++
++	if(exists_in_storage(dentry))
++		hidden_dentry = dtohd2(dentry);
++	else
++		hidden_dentry = dtohd(dentry);
++
++	ASSERT(hidden_dentry);
++	ASSERT(hidden_dentry->d_inode);
++	ASSERT(hidden_dentry->d_inode->i_op);
++
++	if (hidden_dentry->d_inode->i_op->listxattr) {
++		encoded_list = list;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&hidden_dentry->d_inode->i_mutex);
++#else
++		down(&hidden_dentry->d_inode->i_sem);
++#endif
++		/* lock_kernel() already done by caller. */
++		err = hidden_dentry->d_inode->i_op->listxattr(hidden_dentry, encoded_list, size);
++		/* unlock_kernel() will be done by caller. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&hidden_dentry->d_inode->i_mutex);
++#else
++		up(&hidden_dentry->d_inode->i_sem);
++#endif
++	}
++	return err;
++}
++# endif /* defined(XATTR) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20)) */
++
++struct inode_operations mini_fo_symlink_iops =
++	{
++		readlink:	mini_fo_readlink,
++		follow_link: mini_fo_follow_link,
++		/* mk: permission:	mini_fo_permission, */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++		revalidate:	mini_fo_inode_revalidate,
++#endif
++		setattr:	mini_fo_setattr,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++		getattr:	mini_fo_getattr,
++		put_link:       mini_fo_put_link,
++#endif
++
++#if defined(XATTR) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20))
++		setxattr:	mini_fo_setxattr,
++		getxattr:	mini_fo_getxattr,
++		listxattr:	mini_fo_listxattr,
++		removexattr: mini_fo_removexattr
++# endif /* defined(XATTR) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20)) */
++	};
++
++struct inode_operations mini_fo_dir_iops =
++	{
++		create:	mini_fo_create,
++		lookup:	mini_fo_lookup,
++		link:	mini_fo_link,
++		unlink:	mini_fo_unlink,
++		symlink:	mini_fo_symlink,
++		mkdir:	mini_fo_mkdir,
++		rmdir:	mini_fo_rmdir,
++		mknod:	mini_fo_mknod,
++		rename:	mini_fo_rename,
++		/* no readlink/follow_link for non-symlinks */
++		// off because we have setattr
++		//    truncate:	mini_fo_truncate,
++		/* mk:permission:	mini_fo_permission, */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++		revalidate:	mini_fo_inode_revalidate,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++		getattr:	mini_fo_getattr,
++#endif
++		setattr:	mini_fo_setattr,
++#if defined(XATTR) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20))
++		setxattr:	mini_fo_setxattr,
++		getxattr:	mini_fo_getxattr,
++		listxattr:	mini_fo_listxattr,
++		removexattr: mini_fo_removexattr
++# endif /* XATTR && LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) */
++	};
++
++struct inode_operations mini_fo_main_iops =
++	{
++		/* permission:	mini_fo_permission, */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++		revalidate:	mini_fo_inode_revalidate,
++#endif
++		setattr:	mini_fo_setattr,
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++		getattr:	mini_fo_getattr,
++#endif
++#if defined(XATTR) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20))
++		setxattr:	mini_fo_setxattr,
++		getxattr:	mini_fo_getxattr,
++		listxattr:	mini_fo_listxattr,
++		removexattr:    mini_fo_removexattr
++# endif /* XATTR && LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) */
++	};
+diff -urN linux-2.6.21.1.old/fs/mini_fo/main.c linux-2.6.21.1.dev/fs/mini_fo/main.c
+--- linux-2.6.21.1.old/fs/mini_fo/main.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/main.c	2007-05-26 21:01:26.164329720 +0200
+@@ -0,0 +1,423 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ *  $Id$
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif
++
++#include "fist.h"
++#include "mini_fo.h"
++#include <linux/module.h>
++
++/* This definition must only appear after we include <linux/module.h> */
++#ifndef MODULE_LICENSE
++# define MODULE_LICENSE(bison)
++#endif /* not MODULE_LICENSE */
++
++/*
++ * This is the mini_fo tri interpose function, which extends the
++ * functionality of the regular interpose by interposing a higher
++ * level inode on top of two lower level ones: the base filesystem
++ * inode and the storage filesystem inode.
++ *
++ *  sb we pass is mini_fo's super_block
++ */
++int
++mini_fo_tri_interpose(dentry_t *hidden_dentry,
++		      dentry_t *hidden_sto_dentry,
++		      dentry_t *dentry, super_block_t *sb, int flag)
++{
++	inode_t *hidden_inode = NULL;
++	inode_t *hidden_sto_inode = NULL; /* store corresponding storage inode */
++	int err = 0;
++	inode_t *inode;
++
++	/* Pointer to hidden_sto_inode if exists, else to hidden_inode.
++	 * This is used to copy the attributes of the correct inode. */
++	inode_t *master_inode;
++
++	if(hidden_dentry)
++		hidden_inode = hidden_dentry->d_inode;
++	if(hidden_sto_dentry)
++		hidden_sto_inode = hidden_sto_dentry->d_inode;
++
++	ASSERT(dentry->d_inode == NULL);
++
++	/* mk: One of the inodes associated with the dentrys is likely to
++	 * be NULL, so carefull:
++	 */
++	ASSERT((hidden_inode != NULL) || (hidden_sto_inode != NULL));
++
++	if(hidden_sto_inode)
++		master_inode = hidden_sto_inode;
++	else
++		master_inode = hidden_inode;
++
++	/*
++	 * We allocate our new inode below, by calling iget.
++	 * iget will call our read_inode which will initialize some
++	 * of the new inode's fields
++	 */
++
++	/*
++	 * original: inode = iget(sb, hidden_inode->i_ino);
++	 */
++	inode = iget(sb, iunique(sb, 25));
++	if (!inode) {
++		err = -EACCES;		/* should be impossible??? */
++		goto out;
++	}
++
++	/*
++	 * interpose the inode if not already interposed
++	 *   this is possible if the inode is being reused
++	 * XXX: what happens if we get_empty_inode() but there's another already?
++	 * for now, ASSERT() that this can't happen; fix later.
++	 */
++	if (itohi(inode) != NULL) {
++		printk(KERN_CRIT "mini_fo_tri_interpose: itohi(inode) != NULL.\n");
++	}
++	if (itohi2(inode) != NULL) {
++		printk(KERN_CRIT "mini_fo_tri_interpose: itohi2(inode) != NULL.\n");
++	}
++
++	/* mk: Carefull, igrab can't handle NULL inodes (ok, why should it?), so
++	 * we need to check here:
++	 */
++	if(hidden_inode)
++		itohi(inode) = igrab(hidden_inode);
++	else
++		itohi(inode) = NULL;
++
++	if(hidden_sto_inode)
++		itohi2(inode) = igrab(hidden_sto_inode);
++	else
++		itohi2(inode) = NULL;
++
++
++	/* Use different set of inode ops for symlinks & directories*/
++	if (S_ISLNK(master_inode->i_mode))
++		inode->i_op = &mini_fo_symlink_iops;
++	else if (S_ISDIR(master_inode->i_mode))
++		inode->i_op = &mini_fo_dir_iops;
++
++	/* Use different set of file ops for directories */
++	if (S_ISDIR(master_inode->i_mode))
++		inode->i_fop = &mini_fo_dir_fops;
++
++	/* properly initialize special inodes */
++	if (S_ISBLK(master_inode->i_mode) || S_ISCHR(master_inode->i_mode) ||
++	    S_ISFIFO(master_inode->i_mode) || S_ISSOCK(master_inode->i_mode)) {
++		init_special_inode(inode, master_inode->i_mode, master_inode->i_rdev);
++	}
++
++	/* Fix our inode's address operations to that of the lower inode */
++	if (inode->i_mapping->a_ops != master_inode->i_mapping->a_ops) {
++		inode->i_mapping->a_ops = master_inode->i_mapping->a_ops;
++	}
++
++	/* only (our) lookup wants to do a d_add */
++	if (flag)
++		d_add(dentry, inode);
++	else
++		d_instantiate(dentry, inode);
++
++	ASSERT(dtopd(dentry) != NULL);
++
++	/* all well, copy inode attributes */
++	fist_copy_attr_all(inode, master_inode);
++
++ out:
++	return err;
++}
++
++/* parse mount options "base=" and "sto=" */
++dentry_t *
++mini_fo_parse_options(super_block_t *sb, char *options)
++{
++	dentry_t *hidden_root = ERR_PTR(-EINVAL);
++	dentry_t *hidden_root2 = ERR_PTR(-EINVAL);
++	struct nameidata nd, nd2; 
++	char *name, *tmp, *end;
++	int err = 0;
++
++	/* We don't want to go off the end of our arguments later on. */
++	for (end = options; *end; end++);
++
++	while (options < end) {
++		tmp = options;
++		while (*tmp && *tmp != ',')
++			tmp++;
++		*tmp = '\0';
++		if (!strncmp("base=", options, 5)) {
++			name = options + 5;
++			printk(KERN_INFO "mini_fo: using base directory: %s\n", name);
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++			if (path_init(name, LOOKUP_FOLLOW, &nd))
++				err = path_walk(name, &nd);
++#else
++			err = path_lookup(name, LOOKUP_FOLLOW, &nd);
++#endif
++			if (err) {
++				printk(KERN_CRIT "mini_fo: error accessing hidden directory '%s'\n", name);
++				hidden_root = ERR_PTR(err);
++				goto out;
++			}
++			hidden_root = nd.dentry;
++			stopd(sb)->base_dir_dentry = nd.dentry;
++			stopd(sb)->hidden_mnt = nd.mnt;
++
++		} else if(!strncmp("sto=", options, 4)) {
++			/* parse the storage dir */
++			name = options + 4;
++			printk(KERN_INFO "mini_fo: using storage directory: %s\n", name);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++			if(path_init(name, LOOKUP_FOLLOW, &nd2))
++				err = path_walk(name, &nd2);
++#else
++                        err = path_lookup(name, LOOKUP_FOLLOW, &nd2);
++#endif
++			if(err) {
++				printk(KERN_CRIT "mini_fo: error accessing hidden storage directory '%s'\n", name);
++
++				hidden_root2 = ERR_PTR(err);
++				goto out;
++			}
++			hidden_root2 = nd2.dentry;
++			stopd(sb)->storage_dir_dentry = nd2.dentry;
++			stopd(sb)->hidden_mnt2 = nd2.mnt;
++			stohs2(sb) = hidden_root2->d_sb;
++
++			/* validate storage dir, this is done in 
++			 * mini_fo_read_super for the base directory.
++			 */
++			if (IS_ERR(hidden_root2)) {
++				printk(KERN_WARNING "mini_fo_parse_options: storage dentry lookup failed (err = %ld)\n", PTR_ERR(hidden_root2));
++				goto out;
++			}
++			if (!hidden_root2->d_inode) {
++				printk(KERN_WARNING "mini_fo_parse_options: no storage dir to interpose on.\n");
++				goto out;
++			}
++			stohs2(sb) = hidden_root2->d_sb;
++		} else {
++			printk(KERN_WARNING "mini_fo: unrecognized option '%s'\n", options);
++			hidden_root = ERR_PTR(-EINVAL);
++			goto out;
++		}
++		options = tmp + 1;
++	}
++
++ out:
++	if(IS_ERR(hidden_root2))
++		return hidden_root2;
++	return hidden_root;
++}
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++static int
++#else
++super_block_t *
++#endif
++mini_fo_read_super(super_block_t *sb, void *raw_data, int silent)
++{
++	dentry_t *hidden_root;
++	int err = 0;
++
++	if (!raw_data) {
++		printk(KERN_WARNING "mini_fo_read_super: missing argument\n");
++		err = -EINVAL;
++		goto out;
++	}
++	/*
++	 * Allocate superblock private data
++	 */
++	__stopd(sb) = kmalloc(sizeof(struct mini_fo_sb_info), GFP_KERNEL);
++	if (!stopd(sb)) {
++		printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__);
++		err = -ENOMEM;
++		goto out;
++	}
++	stohs(sb) = NULL;
++
++	hidden_root = mini_fo_parse_options(sb, raw_data);
++	if (IS_ERR(hidden_root)) {
++		printk(KERN_WARNING "mini_fo_read_super: lookup_dentry failed (err = %ld)\n", PTR_ERR(hidden_root));
++		err = PTR_ERR(hidden_root);
++		goto out_free;
++	}
++	if (!hidden_root->d_inode) {
++		printk(KERN_WARNING "mini_fo_read_super: no directory to interpose on\n");
++		goto out_free;
++	}
++	stohs(sb) = hidden_root->d_sb;
++
++	/*
++	 * Linux 2.4.2-ac3 and beyond has code in
++	 * mm/filemap.c:generic_file_write() that requires sb->s_maxbytes
++	 * to be populated.  If not set, all write()s under that sb will
++	 * return 0.
++	 *
++	 * Linux 2.4.4+ automatically sets s_maxbytes to MAX_NON_LFS;
++	 * the filesystem should override it only if it supports LFS.
++	 */
++	/* non-SCA code is good to go with LFS */
++	sb->s_maxbytes = hidden_root->d_sb->s_maxbytes;
++
++	sb->s_op = &mini_fo_sops;
++	/*
++	 * we can't use d_alloc_root if we want to use
++	 * our own interpose function unchanged,
++	 * so we simply replicate *most* of the code in d_alloc_root here
++	 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++	sb->s_root = d_alloc(NULL, &(const struct qstr) { "/", 1, 0 });
++#else
++	sb->s_root = d_alloc(NULL, &(const struct qstr){hash: 0, name: "/", len : 1});
++#endif
++	if (IS_ERR(sb->s_root)) {
++		printk(KERN_WARNING "mini_fo_read_super: d_alloc failed\n");
++		err = -ENOMEM;
++		goto out_dput;
++	}
++
++	sb->s_root->d_op = &mini_fo_dops;
++	sb->s_root->d_sb = sb;
++	sb->s_root->d_parent = sb->s_root;
++
++	/* link the upper and lower dentries */
++	__dtopd(sb->s_root) = (struct mini_fo_dentry_info *) 
++		kmalloc(sizeof(struct mini_fo_dentry_info), GFP_KERNEL);
++	if (!dtopd(sb->s_root)) {
++		err = -ENOMEM;
++		goto out_dput2;
++	}
++	dtopd(sb->s_root)->state = MODIFIED;
++	dtohd(sb->s_root) = hidden_root;
++
++	/* fanout relevant, interpose on storage root dentry too */
++	dtohd2(sb->s_root) = stopd(sb)->storage_dir_dentry;
++
++	/* ...and call tri-interpose to interpose root dir inodes
++	 * if (mini_fo_interpose(hidden_root, sb->s_root, sb, 0))
++	 */
++	if(mini_fo_tri_interpose(hidden_root, dtohd2(sb->s_root), sb->s_root, sb, 0))
++		goto out_dput2;
++
++	/* initalize the wol list */
++	itopd(sb->s_root->d_inode)->deleted_list_size = -1;
++	itopd(sb->s_root->d_inode)->renamed_list_size = -1;
++	meta_build_lists(sb->s_root);
++
++	goto out;
++
++ out_dput2:
++	dput(sb->s_root);
++ out_dput:
++	dput(hidden_root);
++	dput(dtohd2(sb->s_root)); /* release the hidden_sto_dentry too */
++ out_free:
++	kfree(stopd(sb));
++	__stopd(sb) = NULL;
++ out:
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++        return err;
++#else
++        if (err) {
++		return ERR_PTR(err);
++        } else {
++		return sb;
++        }
++#endif
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++static int mini_fo_get_sb(struct file_system_type *fs_type,
++					  int flags, const char *dev_name,
++					  void *raw_data, struct vfsmount *mnt) 
++{
++	return get_sb_nodev(fs_type, flags, raw_data, mini_fo_read_super, mnt);
++}
++#else
++static struct super_block *mini_fo_get_sb(struct file_system_type *fs_type,
++					  int flags, const char *dev_name,
++					  void *raw_data) 
++{
++	return get_sb_nodev(fs_type, flags, raw_data, mini_fo_read_super);
++}
++#endif
++
++void mini_fo_kill_block_super(struct super_block *sb)
++{
++	generic_shutdown_super(sb);
++	/*
++	 *      XXX: BUG: Halcrow: Things get unstable sometime after this point:
++	 *      lib/rwsem-spinlock.c:127: spin_is_locked on uninitialized
++	 *      fs/fs-writeback.c:402: spin_lock(fs/super.c:a0381828) already
++	 *      locked by fs/fs-writeback.c/402
++	 *
++	 *      Apparently, someone's not releasing a lock on sb_lock...
++	 */
++}
++
++static struct file_system_type mini_fo_fs_type = {
++	.owner          = THIS_MODULE,
++	.name           = "mini_fo",
++	.get_sb         = mini_fo_get_sb,
++	.kill_sb        = mini_fo_kill_block_super,
++	.fs_flags       = 0,
++};
++
++
++#else
++static DECLARE_FSTYPE(mini_fo_fs_type, "mini_fo", mini_fo_read_super, 0);
++#endif
++
++static int __init init_mini_fo_fs(void)
++{
++	printk("Registering mini_fo version $Id$\n");
++	return register_filesystem(&mini_fo_fs_type);
++}
++static void __exit exit_mini_fo_fs(void)
++{
++	printk("Unregistering mini_fo version $Id$\n");
++	unregister_filesystem(&mini_fo_fs_type);
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++EXPORT_NO_SYMBOLS;
++#endif
++
++MODULE_AUTHOR("Erez Zadok <ezk@cs.sunysb.edu>");
++MODULE_DESCRIPTION("FiST-generated mini_fo filesystem");
++MODULE_LICENSE("GPL");
++
++/* MODULE_PARM(fist_debug_var, "i"); */
++/* MODULE_PARM_DESC(fist_debug_var, "Debug level"); */
++
++module_init(init_mini_fo_fs)
++module_exit(exit_mini_fo_fs)
+diff -urN linux-2.6.21.1.old/fs/mini_fo/Makefile linux-2.6.21.1.dev/fs/mini_fo/Makefile
+--- linux-2.6.21.1.old/fs/mini_fo/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/Makefile	2007-05-26 21:01:26.164329720 +0200
+@@ -0,0 +1,17 @@
++#
++# Makefile for mini_fo 2.4 and 2.6 Linux kernels
++#
++# Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++#
++# This program is free software; you can redistribute it and/or
++# modify it under the terms of the GNU General Public License
++# as published by the Free Software Foundation; either version
++# 2 of the License, or (at your option) any later version.
++#
++
++obj-$(CONFIG_MINI_FO) := mini_fo.o
++mini_fo-objs   := meta.o dentry.o file.o inode.o main.o super.o state.o aux.o
++
++# dependencies
++${mini_fo-objs}: mini_fo.h fist.h
++
+diff -urN linux-2.6.21.1.old/fs/mini_fo/meta.c linux-2.6.21.1.dev/fs/mini_fo/meta.c
+--- linux-2.6.21.1.old/fs/mini_fo/meta.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/meta.c	2007-05-26 21:01:26.166329416 +0200
+@@ -0,0 +1,1000 @@
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif /* HAVE_CONFIG_H */
++#include "fist.h"
++#include "mini_fo.h"
++
++int meta_build_lists(dentry_t *dentry) 
++{
++	struct mini_fo_inode_info *inode_info;
++
++	dentry_t *meta_dentry = 0;
++	file_t *meta_file = 0;
++	mm_segment_t old_fs;
++	void *buf;
++
++	int bytes, len;
++	struct vfsmount *meta_mnt;
++	char *entry;
++
++	inode_info = itopd(dentry->d_inode);
++	if(!(inode_info->deleted_list_size == -1 &&
++	     inode_info->renamed_list_size == -1)) {
++		printk(KERN_CRIT "mini_fo: meta_build_lists: \
++                                  Error, list(s) not virgin.\n");
++		return -1;
++	}
++
++	/* init our meta lists */
++	INIT_LIST_HEAD(&inode_info->deleted_list);
++	inode_info->deleted_list_size = 0;
++
++	INIT_LIST_HEAD(&inode_info->renamed_list);
++	inode_info->renamed_list_size = 0;
++
++  	/* might there be a META-file? */
++	if(dtohd2(dentry) && dtohd2(dentry)->d_inode) {
++		meta_dentry = lookup_one_len(META_FILENAME,
++					     dtohd2(dentry), 
++					     strlen(META_FILENAME));
++		if(!meta_dentry->d_inode) {
++			dput(meta_dentry);
++			goto out_ok;
++		}
++		/* $%& err, is this correct? */
++		meta_mnt = stopd(dentry->d_inode->i_sb)->hidden_mnt2;
++		mntget(meta_mnt);
++		
++
++		/* open META-file for reading */
++		meta_file = dentry_open(meta_dentry, meta_mnt, 0x0);
++		if(!meta_file || IS_ERR(meta_file)) {
++			printk(KERN_CRIT "mini_fo: meta_build_lists: \
++                                          ERROR opening META file.\n");
++			goto out_err;
++		}
++
++		/* check if fs supports reading */
++		if(!meta_file->f_op->read) {
++			printk(KERN_CRIT "mini_fo: meta_build_lists: \
++                                          ERROR, fs does not support reading.\n");
++			goto out_err_close;
++		}
++
++		/* allocate a page for transfering the data */
++		buf = (void *) __get_free_page(GFP_KERNEL);
++		if(!buf) {
++			printk(KERN_CRIT "mini_fo: meta_build_lists: \
++                                          ERROR, out of mem.\n");
++			goto out_err_close;
++		}
++		meta_file->f_pos = 0;
++		old_fs = get_fs();
++		set_fs(KERNEL_DS);
++		do {
++			char *c;
++			bytes = meta_file->f_op->read(meta_file, buf, PAGE_SIZE, &meta_file->f_pos);
++			if(bytes == PAGE_SIZE) {
++				/* trim a cut off filename and adjust f_pos to get it next time */
++				for(c = (char*) buf+PAGE_SIZE;
++				    *c != '\n';
++				    c--, bytes--, meta_file->f_pos--);
++			}
++			entry = (char *) buf;
++			while(entry < (char *) buf+bytes) {
++
++				char *old_path;
++				char *dir_name;
++				int old_len, new_len;
++
++				/* len without '\n'*/
++				len = (int) (strchr(entry, '\n') - entry);
++				switch (*entry) {
++				case 'D':
++					/* format: "D filename" */
++					meta_list_add_d_entry(dentry, 
++							      entry+2, 
++							      len-2);
++					break;
++				case 'R':
++					/* format: "R path/xy/dir newDir" */
++					old_path = entry+2;
++					dir_name = strchr(old_path, ' ') + 1;
++					old_len =  dir_name - old_path - 1;
++					new_len = ((int) entry) + len - ((int ) dir_name);
++					meta_list_add_r_entry(dentry, 
++							      old_path, 
++							      old_len,
++							      dir_name, 
++							      new_len);
++					break;
++				default:
++					/* unknown entry type detected */
++					break;
++				}
++				entry += len+1;
++			}
++
++		} while(meta_file->f_pos < meta_dentry->d_inode->i_size);
++
++		free_page((unsigned long) buf);
++		set_fs(old_fs);
++		fput(meta_file);
++	}
++	goto out_ok;
++
++ out_err_close:
++	fput(meta_file);
++ out_err:
++	mntput(meta_mnt);
++	dput(meta_dentry);
++	return -1;
++ out_ok:
++	return 1; /* check this!!! inode_info->wol_size; */ 
++}
++
++/* cleanups up all lists and free's the mem by dentry */
++int meta_put_lists(dentry_t *dentry) 
++{
++	if(!dentry || !dentry->d_inode) {
++		printk("mini_fo: meta_put_lists: invalid dentry passed.\n");
++		return -1;
++	}
++	return __meta_put_lists(dentry->d_inode);
++}
++
++/* cleanups up all lists and free's the mem by inode */
++int __meta_put_lists(inode_t *inode) 
++{
++	int err = 0;
++	if(!inode || !itopd(inode)) {
++		printk("mini_fo: __meta_put_lists: invalid inode passed.\n");
++		return -1;
++	}
++	err = __meta_put_d_list(inode);
++	err |= __meta_put_r_list(inode);
++	return err;
++}
++
++int meta_sync_lists(dentry_t *dentry)
++{
++	int err = 0;
++	if(!dentry || !dentry->d_inode) {
++		printk("mini_fo: meta_sync_lists: \
++                        invalid dentry passed.\n");
++		return -1;
++	}
++	err = meta_sync_d_list(dentry, 0);
++	err |= meta_sync_r_list(dentry, 1);
++	return err;
++}
++
++
++/* remove all D entries from the renamed list and free the mem */
++int __meta_put_d_list(inode_t *inode) 
++{
++	struct list_head *tmp;
++        struct deleted_entry *del_entry;
++        struct mini_fo_inode_info *inode_info;
++	
++	if(!inode || !itopd(inode)) {
++		printk(KERN_CRIT "mini_fo: __meta_put_d_list: \
++                                  invalid inode passed.\n");
++		return -1;
++	}
++	inode_info = itopd(inode);
++	
++        /* nuke the DELETED-list */
++        if(inode_info->deleted_list_size <= 0)
++		return 0;
++
++	while(!list_empty(&inode_info->deleted_list)) {
++		tmp = inode_info->deleted_list.next;
++		list_del(tmp);
++		del_entry = list_entry(tmp, struct deleted_entry, list);
++		kfree(del_entry->name);
++		kfree(del_entry);
++	}
++	inode_info->deleted_list_size = 0;
++	
++	return 0;
++}
++
++/* remove all R entries from the renamed list and free the mem */
++int __meta_put_r_list(inode_t *inode) 
++{
++	struct list_head *tmp;
++	struct renamed_entry *ren_entry;
++        struct mini_fo_inode_info *inode_info;
++	
++	if(!inode || !itopd(inode)) {
++		printk(KERN_CRIT "mini_fo: meta_put_r_list: invalid inode.\n");
++		return -1;
++	}
++	inode_info = itopd(inode);
++	
++        /* nuke the RENAMED-list */
++        if(inode_info->renamed_list_size <= 0) 
++		return 0;
++
++	while(!list_empty(&inode_info->renamed_list)) {
++		tmp = inode_info->renamed_list.next;
++		list_del(tmp);
++		ren_entry = list_entry(tmp, struct renamed_entry, list);
++		kfree(ren_entry->new_name);
++		kfree(ren_entry->old_name);
++		kfree(ren_entry);
++	}
++	inode_info->renamed_list_size = 0;
++	
++	return 0;
++}
++
++int meta_add_d_entry(dentry_t *dentry, const char *name, int len)
++{
++	int err = 0;
++	err = meta_list_add_d_entry(dentry, name, len);
++	err |= meta_write_d_entry(dentry,name,len);
++	return err;	
++}
++
++/* add a D entry to the deleted list */
++int meta_list_add_d_entry(dentry_t *dentry, const char *name, int len) 
++{
++        struct deleted_entry *del_entry;
++        struct mini_fo_inode_info *inode_info;
++
++	if(!dentry || !dentry->d_inode) {
++		printk(KERN_CRIT "mini_fo: meta_list_add_d_entry: \
++                                  invalid dentry passed.\n");
++		return -1;
++	}
++	inode_info = itopd(dentry->d_inode);
++
++        if(inode_info->deleted_list_size < 0)
++                return -1;
++
++        del_entry = (struct deleted_entry *) 
++		kmalloc(sizeof(struct deleted_entry), GFP_KERNEL);
++        del_entry->name = (char*) kmalloc(len, GFP_KERNEL);
++        if(!del_entry || !del_entry->name) {
++                printk(KERN_CRIT "mini_fo: meta_list_add_d_entry: \
++                                  out of mem.\n");
++		kfree(del_entry->name);
++		kfree(del_entry);
++                return -ENOMEM;
++        }
++
++        strncpy(del_entry->name, name, len);
++        del_entry->len = len;
++
++        list_add(&del_entry->list, &inode_info->deleted_list);
++        inode_info->deleted_list_size++;
++        return 0;
++}
++
++int meta_add_r_entry(dentry_t *dentry, 
++			  const char *old_name, int old_len, 
++			  const char *new_name, int new_len)
++{
++	int err = 0;
++	err = meta_list_add_r_entry(dentry, 
++				    old_name, old_len,
++				    new_name, new_len);
++	err |= meta_write_r_entry(dentry,
++				  old_name, old_len,
++				  new_name, new_len);
++	return err;
++}
++
++/* add a R entry to the renamed list */
++int meta_list_add_r_entry(dentry_t *dentry, 
++			  const char *old_name, int old_len, 
++			  const char *new_name, int new_len)
++{
++        struct renamed_entry *ren_entry;
++        struct mini_fo_inode_info *inode_info;
++
++	if(!dentry || !dentry->d_inode) {
++		printk(KERN_CRIT "mini_fo: meta_list_add_r_entry: \
++                                  invalid dentry passed.\n");
++		return -1;
++	}
++	inode_info = itopd(dentry->d_inode);
++
++        if(inode_info->renamed_list_size < 0)
++                return -1;
++
++        ren_entry = (struct renamed_entry *) 
++		kmalloc(sizeof(struct renamed_entry), GFP_KERNEL);
++        ren_entry->old_name = (char*) kmalloc(old_len, GFP_KERNEL);
++        ren_entry->new_name = (char*) kmalloc(new_len, GFP_KERNEL);
++
++        if(!ren_entry || !ren_entry->old_name || !ren_entry->new_name) {
++                printk(KERN_CRIT "mini_fo: meta_list_add_r_entry: \
++                                  out of mem.\n");
++		kfree(ren_entry->new_name);
++		kfree(ren_entry->old_name);
++		kfree(ren_entry);
++                return -ENOMEM;
++        }
++
++        strncpy(ren_entry->old_name, old_name, old_len);
++        ren_entry->old_len = old_len;
++        strncpy(ren_entry->new_name, new_name, new_len);
++        ren_entry->new_len = new_len;
++
++        list_add(&ren_entry->list, &inode_info->renamed_list);
++        inode_info->renamed_list_size++;
++        return 0;
++}
++
++
++int meta_remove_r_entry(dentry_t *dentry, const char *name, int len)
++{
++	int err = 0;
++	if(!dentry || !dentry->d_inode) {
++		printk(KERN_CRIT 
++		       "mini_fo: meta_remove_r_entry: \
++                        invalid dentry passed.\n");
++		return -1;
++	}
++
++	err = meta_list_remove_r_entry(dentry, name, len);
++	err |= meta_sync_lists(dentry);
++	return err;
++}
++
++int meta_list_remove_r_entry(dentry_t *dentry, const char *name, int len)
++{
++	if(!dentry || !dentry->d_inode) {
++		printk(KERN_CRIT 
++		       "mini_fo: meta_list_remove_r_entry: \
++                        invalid dentry passed.\n");
++		return -1;
++	}
++	return __meta_list_remove_r_entry(dentry->d_inode, name, len);
++}
++
++int __meta_list_remove_r_entry(inode_t *inode, const char *name, int len)
++{
++	struct list_head *tmp;
++        struct renamed_entry *ren_entry;
++        struct mini_fo_inode_info *inode_info;
++
++	if(!inode || !itopd(inode))
++		printk(KERN_CRIT 
++		       "mini_fo: __meta_list_remove_r_entry: \
++                        invalid inode passed.\n");
++	inode_info = itopd(inode);
++
++        if(inode_info->renamed_list_size < 0)
++                return -1;
++        if(inode_info->renamed_list_size == 0)
++                return 1;
++	
++	list_for_each(tmp, &inode_info->renamed_list) {
++		ren_entry = list_entry(tmp, struct renamed_entry, list);
++		if(ren_entry->new_len != len)
++			continue;
++		
++		if(!strncmp(ren_entry->new_name, name, len)) {
++			list_del(tmp);
++			kfree(ren_entry->new_name);
++			kfree(ren_entry->old_name);
++			kfree(ren_entry);
++			inode_info->renamed_list_size--;
++			return 0;
++		}
++	}
++	return 1;
++}
++
++
++/* append a single D entry to the meta file */
++int meta_write_d_entry(dentry_t *dentry, const char *name, int len) 
++{
++	dentry_t *meta_dentry = 0;
++        file_t *meta_file = 0;
++        mm_segment_t old_fs;
++
++        int bytes, err;
++        struct vfsmount *meta_mnt = 0;
++        char *buf;
++
++	err = 0;
++
++	if(itopd(dentry->d_inode)->deleted_list_size < 0) {
++		err = -1;
++		goto out;
++	}
++
++	if(dtopd(dentry)->state == UNMODIFIED) {
++                err = build_sto_structure(dentry->d_parent, dentry);
++                if(err) {
++                        printk(KERN_CRIT "mini_fo: meta_write_d_entry: \
++                                          build_sto_structure failed.\n");
++			goto out;
++                }
++        }
++	meta_dentry = lookup_one_len(META_FILENAME, 
++				     dtohd2(dentry), strlen (META_FILENAME));
++
++	/* We need to create a META-file */
++        if(!meta_dentry->d_inode) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++		vfs_create(dtohd2(dentry)->d_inode,
++			   meta_dentry, 
++			   S_IRUSR | S_IWUSR,
++			   NULL);
++#else
++                vfs_create(dtohd2(dentry)->d_inode,
++			   meta_dentry, 
++			   S_IRUSR | S_IWUSR);
++#endif
++	}
++        /* open META-file for writing */
++        meta_file = dentry_open(meta_dentry, meta_mnt, 0x1);
++        if(!meta_file || IS_ERR(meta_file)) {
++                printk(KERN_CRIT "mini_fo: meta_write_d_entry: \
++                                  ERROR opening meta file.\n");
++                mntput(meta_mnt); /* $%& is this necessary? */
++                dput(meta_dentry);
++		err = -1;
++                goto out;
++        }
++
++        /* check if fs supports writing */
++        if(!meta_file->f_op->write) {
++                printk(KERN_CRIT "mini_fo: meta_write_d_entry: \
++                                  ERROR, fs does not support writing.\n");
++                goto out_err_close;
++        }
++
++	meta_file->f_pos = meta_dentry->d_inode->i_size; /* append */
++        old_fs = get_fs();
++        set_fs(KERNEL_DS);
++
++	/* size: len for name, 1 for \n and 2 for "D " */
++	buf = (char *) kmalloc(len+3, GFP_KERNEL);
++	if (!buf) {
++		printk(KERN_CRIT "mini_fo: meta_write_d_entry: \
++                                  out of mem.\n");
++		return -ENOMEM;
++	}
++		      
++	buf[0] = 'D';
++	buf[1] = ' ';
++	strncpy(buf+2, name, len);
++	buf[len+2] = '\n';
++	bytes = meta_file->f_op->write(meta_file, buf, len+3, 
++				       &meta_file->f_pos);
++	if(bytes != len+3) {
++		printk(KERN_CRIT "mini_fo: meta_write_d_entry: \
++                                  ERROR writing.\n");
++		err = -1;
++	}
++	kfree(buf);
++	set_fs(old_fs);
++
++ out_err_close:
++	fput(meta_file);
++ out:
++	return err;
++}
++
++/* append a single R entry to the meta file */
++int meta_write_r_entry(dentry_t *dentry, 
++		       const char *old_name, int old_len, 
++		       const char *new_name, int new_len) 
++{
++	dentry_t *meta_dentry = 0;
++        file_t *meta_file = 0;
++        mm_segment_t old_fs;
++
++        int bytes, err, buf_len;
++	struct vfsmount *meta_mnt = 0;
++        char *buf;
++
++
++	err = 0;
++
++	if(itopd(dentry->d_inode)->renamed_list_size < 0) {
++		err = -1;
++		goto out;
++	}
++
++	/* build the storage structure? */
++	if(dtopd(dentry)->state == UNMODIFIED) {
++                err = build_sto_structure(dentry->d_parent, dentry);
++                if(err) {
++                        printk(KERN_CRIT "mini_fo: meta_write_r_entry: \
++                                          build_sto_structure failed.\n");
++			goto out;
++                }
++        }
++	meta_dentry = lookup_one_len(META_FILENAME, 
++				     dtohd2(dentry), 
++				     strlen (META_FILENAME));
++        if(!meta_dentry->d_inode) {
++                /* We need to create a META-file */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++                vfs_create(dtohd2(dentry)->d_inode, 
++			   meta_dentry, S_IRUSR | S_IWUSR, NULL);
++#else
++                vfs_create(dtohd2(dentry)->d_inode, 
++			   meta_dentry, S_IRUSR | S_IWUSR);
++#endif
++	}
++        /* open META-file for writing */
++        meta_file = dentry_open(meta_dentry, meta_mnt, 0x1);
++        if(!meta_file || IS_ERR(meta_file)) {
++                printk(KERN_CRIT "mini_fo: meta_write_r_entry: \
++                                  ERROR opening meta file.\n");
++                mntput(meta_mnt);
++                dput(meta_dentry);
++		err = -1;
++                goto out;
++        }
++
++        /* check if fs supports writing */
++        if(!meta_file->f_op->write) {
++                printk(KERN_CRIT "mini_fo: meta_write_r_entry: \
++                                  ERROR, fs does not support writing.\n");
++                goto out_err_close;
++        }
++
++	meta_file->f_pos = meta_dentry->d_inode->i_size; /* append */
++        old_fs = get_fs();
++        set_fs(KERNEL_DS);
++
++	/* size: 2 for "R ", old_len+new_len for names, 1 blank+1 \n */
++	buf_len = old_len + new_len + 4;
++	buf = (char *) kmalloc(buf_len, GFP_KERNEL);
++	if (!buf) {
++		printk(KERN_CRIT "mini_fo: meta_write_r_entry: out of mem.\n");
++		return -ENOMEM;
++	}
++		      
++	buf[0] = 'R';
++	buf[1] = ' ';
++	strncpy(buf + 2, old_name, old_len);
++	buf[old_len + 2] = ' ';
++	strncpy(buf + old_len + 3, new_name, new_len);
++	buf[buf_len -1] = '\n';
++	bytes = meta_file->f_op->write(meta_file, buf, buf_len, &meta_file->f_pos);
++	if(bytes != buf_len) {
++		printk(KERN_CRIT "mini_fo: meta_write_r_entry: ERROR writing.\n");
++		err = -1;
++	}
++	
++	kfree(buf);
++	set_fs(old_fs);
++
++ out_err_close:
++	fput(meta_file);
++ out:
++	return err;
++}
++
++/* sync D list to disk, append data if app_flag is 1 */
++/* check the meta_mnt, which seems not to be used (properly)  */
++
++int meta_sync_d_list(dentry_t *dentry, int app_flag)
++{
++	dentry_t *meta_dentry;
++        file_t *meta_file;
++        mm_segment_t old_fs;
++	
++        int bytes, err;
++        struct vfsmount *meta_mnt;
++        char *buf;
++
++	struct list_head *tmp;
++        struct deleted_entry *del_entry;
++        struct mini_fo_inode_info *inode_info;
++
++	err = 0;
++	meta_file=0;
++	meta_mnt=0;
++	
++	if(!dentry || !dentry->d_inode) {
++		printk(KERN_CRIT "mini_fo: meta_sync_d_list: \
++                                  invalid inode passed.\n");
++		err = -1;
++		goto out;
++	}
++	inode_info = itopd(dentry->d_inode);
++	
++        if(inode_info->deleted_list_size < 0) {
++		err = -1;
++		goto out;
++	}
++	
++	/* ok, there is something to sync */
++
++	/* build the storage structure? */
++        if(!dtohd2(dentry) && !itohi2(dentry->d_inode)) {
++                err = build_sto_structure(dentry->d_parent, dentry);
++                if(err) {
++                        printk(KERN_CRIT "mini_fo: meta_sync_d_list: \
++                                          build_sto_structure failed.\n");
++			goto out;
++                }
++        }
++	meta_dentry = lookup_one_len(META_FILENAME, 
++				     dtohd2(dentry), 
++				     strlen(META_FILENAME));
++        if(!meta_dentry->d_inode) {
++                /* We need to create a META-file */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++                vfs_create(dtohd2(dentry)->d_inode, 
++			   meta_dentry, S_IRUSR | S_IWUSR, NULL);
++#else
++                vfs_create(dtohd2(dentry)->d_inode, 
++			   meta_dentry, S_IRUSR | S_IWUSR);
++#endif
++		app_flag = 0;
++	}
++	/* need we truncate the meta file? */
++	if(!app_flag) {
++		struct iattr newattrs;
++                newattrs.ia_size = 0;
++                newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&meta_dentry->d_inode->i_mutex);
++#else
++                down(&meta_dentry->d_inode->i_sem);
++#endif
++                err = notify_change(meta_dentry, &newattrs);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&meta_dentry->d_inode->i_mutex);
++#else
++                up(&meta_dentry->d_inode->i_sem);
++#endif
++
++                if(err || meta_dentry->d_inode->i_size != 0) {
++                        printk(KERN_CRIT "mini_fo: meta_sync_d_list: \
++                                          ERROR truncating meta file.\n");
++                        goto out_err_close;
++		}
++	}
++
++        /* open META-file for writing */
++        meta_file = dentry_open(meta_dentry, meta_mnt, 0x1);
++        if(!meta_file || IS_ERR(meta_file)) {
++                printk(KERN_CRIT "mini_fo: meta_sync_d_list: \
++                                  ERROR opening meta file.\n");
++		/* we don't mntget so we dont't mntput (for now)
++		 * mntput(meta_mnt); 
++		 */
++		dput(meta_dentry);
++		err = -1;
++                goto out;
++        }
++
++        /* check if fs supports writing */
++        if(!meta_file->f_op->write) {
++                printk(KERN_CRIT "mini_fo: meta_sync_d_list: \
++                                  ERROR, fs does not support writing.\n");
++                goto out_err_close;
++        }
++	
++	meta_file->f_pos = meta_dentry->d_inode->i_size; /* append */
++        old_fs = get_fs();
++        set_fs(KERNEL_DS);
++
++	/* here we go... */
++        list_for_each(tmp, &inode_info->deleted_list) {
++		del_entry = list_entry(tmp, struct deleted_entry, list);
++		
++		/* size: len for name, 1 for \n and 2 for "D " */
++		buf = (char *) kmalloc(del_entry->len+3, GFP_KERNEL);
++		if (!buf) {
++			printk(KERN_CRIT "mini_fo: meta_sync_d_list: \
++                                          out of mem.\n");
++			return -ENOMEM;
++		}
++		      
++		buf[0] = 'D';
++		buf[1] = ' ';
++		strncpy(buf+2, del_entry->name, del_entry->len);
++		buf[del_entry->len+2] = '\n';
++		bytes = meta_file->f_op->write(meta_file, buf, 
++					       del_entry->len+3, 
++					       &meta_file->f_pos);
++		if(bytes != del_entry->len+3) {
++			printk(KERN_CRIT "mini_fo: meta_sync_d_list: \
++                                          ERROR writing.\n");
++			err |= -1;
++		}
++		kfree(buf);
++	}
++	set_fs(old_fs);
++	
++ out_err_close:
++	fput(meta_file);
++ out:
++	return err;
++
++}
++
++int meta_sync_r_list(dentry_t *dentry, int app_flag)
++{
++	dentry_t *meta_dentry;
++        file_t *meta_file;
++        mm_segment_t old_fs;
++	
++        int bytes, err, buf_len;
++        struct vfsmount *meta_mnt;
++        char *buf;
++	
++	struct list_head *tmp;
++        struct renamed_entry *ren_entry;
++        struct mini_fo_inode_info *inode_info;
++	
++	err = 0;
++	meta_file=0;
++	meta_mnt=0;
++	
++	if(!dentry || !dentry->d_inode) {
++		printk(KERN_CRIT "mini_fo: meta_sync_r_list: \
++                                  invalid dentry passed.\n");
++		err = -1;
++		goto out;
++	}
++	inode_info = itopd(dentry->d_inode);
++	
++        if(inode_info->deleted_list_size < 0) {
++		err = -1;
++		goto out;
++	}
++	
++	/* ok, there is something to sync */
++
++	/* build the storage structure? */
++        if(!dtohd2(dentry) && !itohi2(dentry->d_inode)) {
++                err = build_sto_structure(dentry->d_parent, dentry);
++                if(err) {
++                        printk(KERN_CRIT "mini_fo: meta_sync_r_list: \
++                                          build_sto_structure failed.\n");
++			goto out;
++                }
++        }
++	meta_dentry = lookup_one_len(META_FILENAME, 
++				     dtohd2(dentry), 
++				     strlen(META_FILENAME));
++        if(!meta_dentry->d_inode) {
++                /* We need to create a META-file */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++                vfs_create(dtohd2(dentry)->d_inode, 
++			   meta_dentry, S_IRUSR | S_IWUSR, NULL);
++#else
++                vfs_create(dtohd2(dentry)->d_inode, 
++			   meta_dentry, S_IRUSR | S_IWUSR);
++#endif
++		app_flag = 0;
++	}
++	/* need we truncate the meta file? */
++	if(!app_flag) {
++		struct iattr newattrs;
++                newattrs.ia_size = 0;
++                newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_lock(&meta_dentry->d_inode->i_mutex);
++#else
++                down(&meta_dentry->d_inode->i_sem);
++#endif
++                err = notify_change(meta_dentry, &newattrs);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&meta_dentry->d_inode->i_mutex);
++#else
++                up(&meta_dentry->d_inode->i_sem);
++#endif
++                if(err || meta_dentry->d_inode->i_size != 0) {
++                        printk(KERN_CRIT "mini_fo: meta_sync_r_list: \
++                                          ERROR truncating meta file.\n");
++                        goto out_err_close;
++		}
++	}
++
++        /* open META-file for writing */
++        meta_file = dentry_open(meta_dentry, meta_mnt, 0x1);
++        if(!meta_file || IS_ERR(meta_file)) {
++                printk(KERN_CRIT "mini_fo: meta_sync_r_list: \
++                                  ERROR opening meta file.\n");
++		/* we don't mntget so we dont't mntput (for now)
++		 * mntput(meta_mnt); 
++		 */
++		dput(meta_dentry);
++		err = -1;
++                goto out;
++        }
++
++        /* check if fs supports writing */
++        if(!meta_file->f_op->write) {
++                printk(KERN_CRIT "mini_fo: meta_sync_r_list: \
++                                  ERROR, fs does not support writing.\n");
++                goto out_err_close;
++        }
++	
++	meta_file->f_pos = meta_dentry->d_inode->i_size; /* append */
++        old_fs = get_fs();
++        set_fs(KERNEL_DS);
++
++	/* here we go... */
++        list_for_each(tmp, &inode_info->renamed_list) {
++		ren_entry = list_entry(tmp, struct renamed_entry, list);
++		/* size: 
++		 * 2 for "R ", old_len+new_len for names, 1 blank+1 \n */
++		buf_len = ren_entry->old_len + ren_entry->new_len + 4;
++		buf = (char *) kmalloc(buf_len, GFP_KERNEL);
++		if (!buf) {
++			printk(KERN_CRIT "mini_fo: meta_sync_r_list: \
++                                          out of mem.\n");
++			return -ENOMEM;
++		}
++		buf[0] = 'R';
++		buf[1] = ' ';
++		strncpy(buf + 2, ren_entry->old_name, ren_entry->old_len);
++		buf[ren_entry->old_len + 2] = ' ';
++		strncpy(buf + ren_entry->old_len + 3, 
++			ren_entry->new_name, ren_entry->new_len);
++		buf[buf_len - 1] = '\n';
++		bytes = meta_file->f_op->write(meta_file, buf, 
++					       buf_len, &meta_file->f_pos);
++		if(bytes != buf_len) {
++			printk(KERN_CRIT "mini_fo: meta_sync_r_list: \
++                                          ERROR writing.\n");
++			err |= -1;
++		}		
++		kfree(buf);
++	}
++	set_fs(old_fs);
++	
++ out_err_close:
++	fput(meta_file);
++ out:
++	return err;
++}
++
++int meta_check_d_entry(dentry_t *dentry, const char *name, int len) 
++{
++	if(!dentry || !dentry->d_inode)
++		printk(KERN_CRIT "mini_fo: meta_check_d_dentry: \
++                                  invalid dentry passed.\n");
++	return __meta_check_d_entry(dentry->d_inode, name, len);	
++}
++
++int __meta_check_d_entry(inode_t *inode, const char *name, int len) 
++{
++	struct list_head *tmp;
++        struct deleted_entry *del_entry;
++        struct mini_fo_inode_info *inode_info;
++
++	if(!inode || !itopd(inode))
++		printk(KERN_CRIT "mini_fo: __meta_check_d_dentry: \
++                                  invalid inode passed.\n");
++
++        inode_info = itopd(inode);
++	
++        if(inode_info->deleted_list_size <= 0)
++                return 0;
++
++        list_for_each(tmp, &inode_info->deleted_list) {
++		del_entry = list_entry(tmp, struct deleted_entry, list);
++		if(del_entry->len != len)
++			continue;
++		
++		if(!strncmp(del_entry->name, name, len))
++			return 1;
++	}
++	return 0;
++}
++
++/* 
++ * check if file has been renamed and return path to orig. base dir.
++ * Implements no error return values so far, what of course sucks.
++ * String is null terminated.'
++ */
++char* meta_check_r_entry(dentry_t *dentry, const char *name, int len) 
++{
++	if(!dentry || !dentry->d_inode) {
++		printk(KERN_CRIT "mini_fo: meta_check_r_dentry: \
++                                  invalid dentry passed.\n");
++		return NULL;
++	}
++	return __meta_check_r_entry(dentry->d_inode, name, len);	
++}
++
++char* __meta_check_r_entry(inode_t *inode, const char *name, int len)
++{
++	struct list_head *tmp;
++        struct renamed_entry *ren_entry;
++        struct mini_fo_inode_info *inode_info;
++	char *old_path;
++	
++	if(!inode || !itopd(inode)) {
++		printk(KERN_CRIT "mini_fo: meta_check_r_dentry: \
++                                  invalid inode passed.\n");
++		return NULL;
++	}
++	inode_info = itopd(inode);
++	
++        if(inode_info->renamed_list_size <= 0)
++                return NULL;
++	
++        list_for_each(tmp, &inode_info->renamed_list) {
++		ren_entry = list_entry(tmp, struct renamed_entry, list);
++		if(ren_entry->new_len != len)
++			continue;
++		
++		if(!strncmp(ren_entry->new_name, name, len)) {
++			old_path = (char *) 
++				kmalloc(ren_entry->old_len+1, GFP_KERNEL);
++			strncpy(old_path, 
++				ren_entry->old_name, 
++				ren_entry->old_len);
++			old_path[ren_entry->old_len]='\0';
++			return old_path;
++		}
++	}
++	return NULL;
++}
++
++/*
++ * This version only checks if entry exists and return:
++ *     1 if exists,
++ *     0 if not,
++ *    -1 if error.
++ */
++int meta_is_r_entry(dentry_t *dentry, const char *name, int len) 
++{
++	if(!dentry || !dentry->d_inode) {
++		printk(KERN_CRIT "mini_fo: meta_check_r_dentry [2]: \
++                                  invalid dentry passed.\n");
++		return -1;
++	}
++	return __meta_is_r_entry(dentry->d_inode, name, len);	
++}
++
++int __meta_is_r_entry(inode_t *inode, const char *name, int len)
++{
++	struct list_head *tmp;
++        struct renamed_entry *ren_entry;
++        struct mini_fo_inode_info *inode_info;
++	
++	if(!inode || !itopd(inode)) {
++		printk(KERN_CRIT "mini_fo: meta_check_r_dentry [2]: \
++                                  invalid inode passed.\n");
++		return -1;
++	}
++	inode_info = itopd(inode);
++	
++        if(inode_info->renamed_list_size <= 0)
++                return -1;
++	
++        list_for_each(tmp, &inode_info->renamed_list) {
++		ren_entry = list_entry(tmp, struct renamed_entry, list);
++		if(ren_entry->new_len != len)
++			continue;
++		
++		if(!strncmp(ren_entry->new_name, name, len)) 
++			return 1;
++	}
++	return 0;
++}
++
+diff -urN linux-2.6.21.1.old/fs/mini_fo/mini_fo.h linux-2.6.21.1.dev/fs/mini_fo/mini_fo.h
+--- linux-2.6.21.1.old/fs/mini_fo/mini_fo.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/mini_fo.h	2007-05-26 21:01:26.167329264 +0200
+@@ -0,0 +1,510 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ *  $Id$
++ */
++
++#ifndef __MINI_FO_H_
++#define __MINI_FO_H_
++
++#ifdef __KERNEL__
++
++/* META stuff */
++#define META_FILENAME "META_dAfFgHE39ktF3HD2sr"
++
++/* use xattrs? */
++#define XATTR
++
++/* File attributes that when changed, result in a file beeing copied to storage */
++#define COPY_FLAGS ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_SIZE
++
++/*
++ * mini_fo filestates
++ */
++#define MODIFIED       1
++#define UNMODIFIED     2
++#define CREATED        3
++#define DEL_REWRITTEN  4
++#define DELETED        5
++#define NON_EXISTANT   6
++
++/* fist file systems superblock magic */
++# define MINI_FO_SUPER_MAGIC 0xf15f
++
++/*
++ * STRUCTURES:
++ */
++
++/* mini_fo inode data in memory */
++struct mini_fo_inode_info {
++	inode_t *wii_inode;
++	inode_t *wii_inode2; /* pointer to storage inode */
++
++	/* META-data lists */
++	/* deleted list, ex wol */
++	struct list_head deleted_list;
++	int deleted_list_size;
++
++	/* renamed list */
++	struct list_head renamed_list;
++	int renamed_list_size;
++
++	/* add other lists here ... */
++};
++
++/* mini_fo dentry data in memory */
++struct mini_fo_dentry_info {
++	dentry_t *wdi_dentry;
++	dentry_t *wdi_dentry2; /* pointer to  storage dentry */
++	unsigned int state;  /* state of the mini_fo dentry */
++};
++
++
++/* mini_fo super-block data in memory */
++struct mini_fo_sb_info {
++	super_block_t *wsi_sb, *wsi_sb2; /* mk: might point to the same sb */
++	struct vfsmount *hidden_mnt, *hidden_mnt2;
++	dentry_t *base_dir_dentry;
++	dentry_t *storage_dir_dentry;
++	;
++};
++
++/* readdir_data, readdir helper struct */
++struct readdir_data {
++	struct list_head ndl_list; /* linked list head ptr */
++	int ndl_size; /* list size */
++	int sto_done; /* flag to show that the storage dir entries have
++		       * all been read an now follow base entries */
++};
++
++/* file private data. */
++struct mini_fo_file_info {
++	struct file *wfi_file;
++	struct file *wfi_file2; /* pointer to storage file */
++	struct readdir_data rd;
++};
++
++/* struct ndl_entry */
++struct ndl_entry {
++	struct list_head list;
++	char *name;
++	int len;
++};
++
++/********************************
++ *  META-data structures
++ ********************************/
++
++/* deleted entry */
++struct deleted_entry {
++	struct list_head list;
++	char *name;
++	int len;
++};
++
++/* renamed entry */
++struct renamed_entry {
++	struct list_head list;
++	char *old_name;     /* old directory with full path */
++	int old_len;        /* length of above string */
++	char *new_name;     /* new directory name */
++	int new_len;        /* length of above string */
++};
++
++/* attr_change entry */
++struct attr_change_entry {
++	struct list_head list;
++	char *name;
++	int len;
++};
++
++/* link entry */
++struct link_entry {
++	struct list_head list;
++	int links_moved;
++	int inum_base;
++	int inum_sto;
++	char *weird_name;
++	int weird_name_len;
++};
++
++
++/* Some other stuff required for mini_fo_filldir64, copied from
++ * fs/readdir.c
++ */
++
++#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
++#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
++
++
++struct linux_dirent64 {
++        u64             d_ino;
++        s64             d_off;
++        unsigned short  d_reclen;
++        unsigned char   d_type;
++        char            d_name[0];
++};
++
++
++struct getdents_callback64 {
++        struct linux_dirent64 * current_dir;
++        struct linux_dirent64 * previous;
++        int count;
++        int error;
++};
++
++struct linux_dirent {
++	unsigned long   d_ino;
++	unsigned long   d_off;
++	unsigned short  d_reclen;
++	char            d_name[1];
++};
++
++struct getdents_callback {
++	struct linux_dirent * current_dir;
++	struct linux_dirent * previous;
++	int count;
++	int error;
++};
++
++
++/*
++ * MACROS:
++ */
++
++/* file TO private_data */
++# define ftopd(file) ((struct mini_fo_file_info *)((file)->private_data))
++# define __ftopd(file) ((file)->private_data)
++/* file TO hidden_file */
++# define ftohf(file) ((ftopd(file))->wfi_file)
++# define ftohf2(file) ((ftopd(file))->wfi_file2) 
++
++/* inode TO private_data */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++# define itopd(ino) ((struct mini_fo_inode_info *)(ino)->i_private)
++# define __itopd(ino) ((ino)->i_private)
++#else
++# define itopd(ino) ((struct mini_fo_inode_info *)(ino)->u.generic_ip)
++# define __itopd(ino) ((ino)->u.generic_ip)
++#endif
++/* inode TO hidden_inode */
++# define itohi(ino) (itopd(ino)->wii_inode)
++# define itohi2(ino) (itopd(ino)->wii_inode2)
++
++/* superblock TO private_data */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++# define stopd(super) ((struct mini_fo_sb_info *)(super)->s_fs_info)
++# define __stopd(super) ((super)->s_fs_info)
++#else
++# define stopd(super) ((struct mini_fo_sb_info *)(super)->u.generic_sbp)
++# define __stopd(super) ((super)->u.generic_sbp)
++#endif
++
++/* unused? # define vfs2priv stopd */
++/* superblock TO hidden_superblock */
++
++# define stohs(super) (stopd(super)->wsi_sb)
++# define stohs2(super) (stopd(super)->wsi_sb2)
++
++/* dentry TO private_data */
++# define dtopd(dentry) ((struct mini_fo_dentry_info *)(dentry)->d_fsdata)
++# define __dtopd(dentry) ((dentry)->d_fsdata)
++/* dentry TO hidden_dentry */
++# define dtohd(dent) (dtopd(dent)->wdi_dentry)
++# define dtohd2(dent) (dtopd(dent)->wdi_dentry2)
++
++/* dentry to state */
++# define dtost(dent) (dtopd(dent)->state)
++# define sbt(sb) ((sb)->s_type->name)
++
++#define IS_WRITE_FLAG(flag) (flag & (O_RDWR | O_WRONLY | O_APPEND))
++#define IS_COPY_FLAG(flag) (flag & (COPY_FLAGS))
++
++/* macros to simplify non-SCA code */
++#  define MALLOC_PAGE_POINTERS(hidden_pages, num_hidden_pages)
++#  define MALLOC_PAGEDATA_POINTERS(hidden_pages_data, num_hidden_pages)
++#  define FREE_PAGE_POINTERS(hidden_pages, num)
++#  define FREE_PAGEDATA_POINTERS(hidden_pages_data, num)
++#  define FOR_EACH_PAGE
++#  define CURRENT_HIDDEN_PAGE hidden_page
++#  define CURRENT_HIDDEN_PAGEDATA hidden_page_data
++#  define CURRENT_HIDDEN_PAGEINDEX page->index
++
++/*
++ * EXTERNALS:
++ */
++extern struct file_operations mini_fo_main_fops;
++extern struct file_operations mini_fo_dir_fops;
++extern struct inode_operations mini_fo_main_iops;
++extern struct inode_operations mini_fo_dir_iops;
++extern struct inode_operations mini_fo_symlink_iops;
++extern struct super_operations mini_fo_sops;
++extern struct dentry_operations mini_fo_dops;
++extern struct vm_operations_struct mini_fo_shared_vmops;
++extern struct vm_operations_struct mini_fo_private_vmops;
++extern struct address_space_operations mini_fo_aops;
++
++#if 0 /* unused by mini_fo */
++extern int mini_fo_interpose(dentry_t *hidden_dentry, dentry_t *this_dentry, super_block_t *sb, int flag);
++#if defined(FIST_FILTER_DATA) || defined(FIST_FILTER_SCA)
++extern page_t *mini_fo_get1page(file_t *file, int index);
++extern int mini_fo_fill_zeros(file_t *file, page_t *page, unsigned from);
++# endif /* FIST_FILTER_DATA || FIST_FILTER_SCA */
++
++
++#  define mini_fo_hidden_dentry(d) __mini_fo_hidden_dentry(__FILE__,__FUNCTION__,__LINE__,(d))
++#  define mini_fo_hidden_sto_dentry(d) __mini_fo_hidden_sto_dentry(__FILE__,__FUNCTION__,__LINE__,(d))
++
++extern dentry_t *__mini_fo_hidden_dentry(char *file, char *func, int line, dentry_t *this_dentry);
++extern dentry_t *__mini_fo_hidden_sto_dentry(char *file, char *func, int line, dentry_t *this_dentry);
++
++extern int mini_fo_read_file(const char *filename, void *buf, int len);
++extern int mini_fo_write_file(const char *filename, void *buf, int len);
++extern dentry_t *fist_lookup(dentry_t *dir, const char *name, vnode_t **out, uid_t uid, gid_t gid);
++#endif /* unused by mini_fo */
++
++/* state transition functions */
++extern int nondir_unmod_to_mod(dentry_t *dentry, int cp_flag);
++extern int nondir_del_rew_to_del(dentry_t *dentry);
++extern int nondir_creat_to_del(dentry_t *dentry);
++extern int nondir_mod_to_del(dentry_t *dentry);
++extern int nondir_unmod_to_del(dentry_t *dentry);
++
++extern int dir_unmod_to_mod(dentry_t *dentry);
++
++/* rename specials */
++extern int rename_directory(inode_t *old_dir, dentry_t *old_dentry, inode_t *new_dir, dentry_t *new_dentry);
++extern int rename_nondir(inode_t *old_dir, dentry_t *old_dentry, inode_t *new_dir, dentry_t *new_dentry);
++
++/* misc stuff */
++extern int mini_fo_tri_interpose(dentry_t *hidden_dentry,
++				 dentry_t *hidden_sto_dentry,
++				 dentry_t *dentry, 
++				 super_block_t *sb, int flag);
++
++extern int mini_fo_cp_cont(dentry_t *tgt_dentry, struct vfsmount *tgt_mnt,
++			   dentry_t *src_dentry, struct vfsmount *src_mnt);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++extern int mini_fo_create(inode_t *dir, dentry_t *dentry, int mode, struct nameidata *nd);
++
++extern int create_sto_nod(dentry_t *dentry, int mode, dev_t dev);
++extern int create_sto_reg_file(dentry_t *dentry, int mode, struct nameidata *nd);
++#else
++extern int mini_fo_create(inode_t *dir, dentry_t *dentry, int mode);
++
++extern int create_sto_nod(dentry_t *dentry, int mode, int dev);
++extern int create_sto_reg_file(dentry_t *dentry, int mode);
++#endif
++
++extern int create_sto_dir(dentry_t *dentry, int mode);
++
++extern int exists_in_storage(dentry_t *dentry);
++extern int is_mini_fo_existant(dentry_t *dentry);
++extern int get_neg_sto_dentry(dentry_t *dentry);
++extern int build_sto_structure(dentry_t *dir, dentry_t *dentry);
++extern int get_mini_fo_bpath(dentry_t *dentry, char **bpath, int *bpath_len);
++extern dentry_t *bpath_walk(super_block_t *sb, char *bpath);
++extern int bpath_put(dentry_t *dentry);
++
++/* check_mini_fo types functions */
++extern int check_mini_fo_dentry(dentry_t *dentry);
++extern int check_mini_fo_file(file_t *file);
++extern int check_mini_fo_inode(inode_t *inode);
++
++/* General meta functions, can be called from outside of meta.c */
++extern int meta_build_lists(dentry_t *dentry);
++extern int meta_put_lists(dentry_t *dentry);
++extern int __meta_put_lists(inode_t *inode);
++
++extern int meta_add_d_entry(dentry_t *dentry, const char *name, int len);
++extern int meta_add_r_entry(dentry_t *dentry, 
++			    const char *old_name, int old_len, 
++			    const char *new_name, int new_len);
++
++extern int meta_remove_r_entry(dentry_t *dentry, const char *name, int len);
++
++extern int meta_check_d_entry(dentry_t *dentry, const char *name, int len);
++extern int __meta_check_d_entry(inode_t *inode, const char *name, int len);
++
++extern char* meta_check_r_entry(dentry_t *dentry, const char *name, int len);
++extern char* __meta_check_r_entry(inode_t *inode, const char *name, int len);
++extern int meta_is_r_entry(dentry_t *dentry, const char *name, int len);
++extern int __meta_is_r_entry(inode_t *inode, const char *name, int len);
++
++/* Specific meta functions, should be called only inside meta.c */
++extern int __meta_put_d_list(inode_t *inode);
++extern int __meta_put_r_list(inode_t *inode);
++
++extern int meta_list_add_d_entry(dentry_t *dentry, 
++				 const char *name, int len);
++extern int meta_list_add_r_entry(dentry_t *dentry, 
++				 const char *old_name, int old_len, 
++				 const char *new_name, int new_len);
++
++extern int meta_list_remove_r_entry(dentry_t *dentry, 
++				    const char *name, int len);
++
++extern int __meta_list_remove_r_entry(inode_t *inode, 
++				      const char *name, int len);
++
++extern int meta_write_d_entry(dentry_t *dentry, const char *name, int len);
++extern int meta_write_r_entry(dentry_t *dentry, 
++			      const char *old_name, int old_len, 
++			      const char *new_name, int new_len);
++
++extern int meta_sync_lists(dentry_t *dentry);
++extern int meta_sync_d_list(dentry_t *dentry, int app_flag);
++extern int meta_sync_r_list(dentry_t *dentry, int app_flag);
++
++/* ndl stuff */
++extern int ndl_add_entry(struct readdir_data *rd, const char *name, int len);
++extern void ndl_put_list(struct readdir_data *rd);
++extern int ndl_check_entry(struct readdir_data *rd, 
++			   const char *name, int len);
++
++
++# define copy_inode_size(dst, src) \
++    dst->i_size = src->i_size; \
++    dst->i_blocks = src->i_blocks;
++
++static inline void
++fist_copy_attr_atime(inode_t *dest, const inode_t *src)
++{
++	ASSERT(dest != NULL);
++	ASSERT(src != NULL);
++	dest->i_atime = src->i_atime;
++}
++static inline void
++fist_copy_attr_times(inode_t *dest, const inode_t *src)
++{
++	ASSERT(dest != NULL);
++	ASSERT(src != NULL);
++	dest->i_atime = src->i_atime;
++	dest->i_mtime = src->i_mtime;
++	dest->i_ctime = src->i_ctime;
++}
++static inline void
++fist_copy_attr_timesizes(inode_t *dest, const inode_t *src)
++{
++	ASSERT(dest != NULL);
++	ASSERT(src != NULL);
++	dest->i_atime = src->i_atime;
++	dest->i_mtime = src->i_mtime;
++	dest->i_ctime = src->i_ctime;
++	copy_inode_size(dest, src);
++}
++static inline void
++fist_copy_attr_all(inode_t *dest, const inode_t *src)
++{
++	ASSERT(dest != NULL);
++	ASSERT(src != NULL);
++	dest->i_mode = src->i_mode;
++	dest->i_nlink = src->i_nlink;
++	dest->i_uid = src->i_uid;
++	dest->i_gid = src->i_gid;
++	dest->i_rdev = src->i_rdev;
++	dest->i_atime = src->i_atime;
++	dest->i_mtime = src->i_mtime;
++	dest->i_ctime = src->i_ctime;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++	dest->i_blksize = src->i_blksize;
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12)
++	dest->i_blkbits = src->i_blkbits;
++# endif /* linux 2.4.12 and newer */
++	copy_inode_size(dest, src);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++	dest->i_attr_flags = src->i_attr_flags;
++#else
++	dest->i_flags = src->i_flags;
++#endif
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++/* copied from linux/fs.h */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++static inline void double_lock(struct dentry *d1, struct dentry *d2)
++{
++	struct mutex *m1 = &d1->d_inode->i_mutex;
++	struct mutex *m2 = &d2->d_inode->i_mutex;
++	if (m1 != m2) {
++		if ((unsigned long) m1 < (unsigned long) m2) {
++			struct mutex *tmp = m2;
++			m2 = m1; m1 = tmp;
++		}
++		mutex_lock(m1);
++	}
++	mutex_lock(m2);
++}
++
++static inline void double_unlock(struct dentry *d1, struct dentry *d2)
++{
++	struct mutex *m1 = &d1->d_inode->i_mutex;
++	struct mutex *m2 = &d2->d_inode->i_mutex;
++	mutex_unlock(m1);
++	if (m1 != m2)
++		mutex_unlock(m2);
++	dput(d1);
++	dput(d2);
++}
++
++#else
++static inline void double_down(struct semaphore *s1, struct semaphore *s2)
++{
++        if (s1 != s2) {
++                if ((unsigned long) s1 < (unsigned long) s2) {
++                        struct semaphore *tmp = s2;
++                        s2 = s1; s1 = tmp;
++                }
++                down(s1);
++        }
++        down(s2);
++}
++
++static inline void double_up(struct semaphore *s1, struct semaphore *s2)
++{
++        up(s1);
++        if (s1 != s2)
++                up(s2);
++}
++
++static inline void double_lock(struct dentry *d1, struct dentry *d2)
++{
++        double_down(&d1->d_inode->i_sem, &d2->d_inode->i_sem);
++}
++
++static inline void double_unlock(struct dentry *d1, struct dentry *d2)
++{
++        double_up(&d1->d_inode->i_sem,&d2->d_inode->i_sem);
++        dput(d1);
++        dput(d2);
++}
++#endif   /* if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) */
++#endif  /* if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) */
++#endif /* __KERNEL__ */
++
++/*
++ * Definitions for user and kernel code
++ */
++
++/* ioctls */
++
++#endif	/* not __MINI_FO_H_ */
+diff -urN linux-2.6.21.1.old/fs/mini_fo/mini_fo-merge linux-2.6.21.1.dev/fs/mini_fo/mini_fo-merge
+--- linux-2.6.21.1.old/fs/mini_fo/mini_fo-merge	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/mini_fo-merge	2007-05-26 21:01:26.167329264 +0200
+@@ -0,0 +1,180 @@
++#!/bin/bash
++#
++# Copyright (C) 2005 Markus Klotzbuecher <mk@creamnet.de>
++# This program is free software; you can redistribute it and/or
++# modify it under the terms of the GNU General Public License
++# as published by the Free Software Foundation; either version
++# 2 of the License, or (at your option) any later version.
++#
++
++BASE=
++STO=
++HELP=
++DRYRUN=
++VERBOSE=
++TMP="/tmp/"
++META_NAME="META_dAfFgHE39ktF3HD2sr"
++SKIP_DEL_LIST="skip-delete-list.mini_fo-merge"
++
++COMMAND=
++exec_command()
++{
++    if [ x$DRYRUN == "xset" ]; then
++	echo "  would run: $COMMAND"
++    elif ! [ x$DRYRUN == "xset" ]; then
++	if [ x$VERBOSE == "xset" ]; then
++	    echo "  running: $COMMAND"
++	fi
++	eval $COMMAND
++    fi
++}
++
++usage()
++{
++cat <<EOF
++
++USAGE: $0 -b <base dir> -s <storage dir>
++Version 0.1
++
++This script merges the contents of a mini_fo storage file system back
++to the base file system.
++
++!!! Warning: This will modify the base filesystem and can destroy data
++             if used wrongly.
++
++Options:
++     -b <base dir>
++          the directory of the base file system.
++
++     -s <storage dir>
++          the directory of the storage file system.
++
++     -d   dry run, will not change anything and print the commands that
++          would be executed.
++
++     -t   tmp dir for storing temporary file. default: $TMP
++
++     -v   show what operations are performed.
++
++     -h   displays this message.
++
++EOF
++}
++
++# parse parameters
++while getopts hdvt:b:s: OPTS
++  do
++  case $OPTS in
++      h)  HELP="set";;
++      d)  DRYRUN="set";;
++      v)  VERBOSE="set";;
++      b)  BASE="$OPTARG";;
++      s)  STO="$OPTARG";;
++      t)  TMP="$OPTARG";;
++      ?)  usage
++	  exit 1;;
++  esac
++done
++
++if [ "x$HELP" == "xset" ]; then
++    usage
++    exit -1
++fi
++
++if ! [ -d "$BASE" ] || ! [ -d "$STO" ]; then
++    echo -e "$0:\n Error, -s and/or -b argument missing. type $0 -h for help."
++    exit -1;
++fi
++
++# get full paths
++pushd $STO; STO=`pwd`; popd
++pushd $BASE; BASE=`pwd`; popd
++TMP=${TMP%/}
++
++
++cat<<EOF
++###############################################################################
++# mini_fo-merge
++#
++# base dir:       $BASE
++# storage dir:    $STO
++# meta filename:  $META_NAME
++# dry run:        $DRYRUN
++# verbose:        $VERBOSE     
++# tmp files:      $TMP
++###############################################################################
++
++EOF
++
++rm $TMP/$SKIP_DEL_LIST
++
++# first process all renamed dirs
++echo "Merging renamed directories..."
++pushd $STO &> /dev/null
++find . -name $META_NAME -type f -print0  | xargs -0 -e grep  -e '^R ' | tr -s ':R' ' ' | while read ENTRY; do 
++    echo "entry: $ENTRY"
++    META_FILE=`echo $ENTRY | cut -d ' ' -f 1`
++    OLD_B_DIR=`echo $ENTRY | cut -d ' ' -f 2 | sed -e 's/\///'`
++    NEW_NAME=`echo $ENTRY | cut -d ' ' -f 3`
++    NEW_B_DIR=`echo $META_FILE | sed -e "s/$META_NAME/$NEW_NAME/" | sed -e 's/^\.\///'`
++    echo "META_FILE: $META_FILE"
++    echo "OLD_B_DIR: $OLD_B_DIR"
++    echo "NEW_NAME: $NEW_NAME"
++    echo  "NEW_B_DIR: $NEW_B_DIR"
++
++    pushd $BASE &> /dev/null
++    # remove an existing dir in storage
++    COMMAND="rm -rf $NEW_B_DIR"; exec_command
++    COMMAND="cp -R $OLD_B_DIR $NEW_B_DIR"; exec_command
++    echo ""
++    popd &> /dev/null
++
++    # remember this dir to exclude it from deleting later
++    echo $NEW_B_DIR >> $TMP/$SKIP_DEL_LIST
++done
++
++# delete all whiteouted files from base
++echo -e "\nDeleting whiteout'ed files from base file system..."
++find . -name $META_NAME -type f -print0  | xargs -0 -e grep  -e '^D ' | sed -e 's/:D//' | while read ENTRY; do 
++    META_FILE=`echo $ENTRY | cut -d ' ' -f 1`
++    DEL_NAME=`echo $ENTRY | cut -d ' ' -f 2`
++    DEL_FILE=`echo $META_FILE | sed -e "s/$META_NAME/$DEL_NAME/" | sed -e 's/^\.\///'`
++    grep -x $DEL_FILE $TMP/$SKIP_DEL_LIST &> /dev/null
++    if [ $? -ne 0 ]; then
++	pushd $BASE &> /dev/null
++	COMMAND="rm -rf $DEL_FILE"; exec_command
++	popd &> /dev/null
++    else
++	echo "  excluding: $DEL_FILE as in skip-del-list."
++    fi
++done
++
++# create all dirs and update permissions
++echo -e "\nSetting up directory structures in base file system..."
++find . -type d | sed -e 's/^\.\///' | while read DIR; do
++    PERMS=`stat -c %a $DIR`
++    DIR_UID=`stat -c %u $DIR`
++    DIR_GID=`stat -c %g $DIR`
++    pushd $BASE &> /dev/null
++    if ! [ -d $DIR ]; then
++	COMMAND="mkdir -p $DIR"; exec_command
++    fi
++    COMMAND="chmod $PERMS $DIR"; exec_command
++    COMMAND="chown $DIR_UID:$DIR_GID $DIR"; exec_command
++    popd &> /dev/null
++done
++
++# merge all non-directory files
++echo -e "\nMerging all non-directory files...."
++for i in b c p f l s; do
++    find . -type $i | sed -e 's/^\.\///' | grep -v "$META_NAME" | while read FILE; do
++	pushd $BASE #&> /dev/null
++	COMMAND="cp -df $STO/$FILE $BASE/$FILE"; exec_command
++	popd &> /dev/null
++    done   
++done
++popd &> /dev/null
++
++#rm $TMP/$SKIP_DEL_LIST 
++
++echo "Done!"
+diff -urN linux-2.6.21.1.old/fs/mini_fo/mini_fo-overlay linux-2.6.21.1.dev/fs/mini_fo/mini_fo-overlay
+--- linux-2.6.21.1.old/fs/mini_fo/mini_fo-overlay	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/mini_fo-overlay	2007-05-26 21:01:26.167329264 +0200
+@@ -0,0 +1,130 @@
++#!/bin/bash
++#
++# Copyright (C) 2005 Markus Klotzbuecher <mk@creamnet.de>
++# This program is free software; you can redistribute it and/or
++# modify it under the terms of the GNU General Public License
++# as published by the Free Software Foundation; either version
++# 2 of the License, or (at your option) any later version.
++#
++
++HELP=
++SUFF=
++MNTP=
++MNT_DIR="/mnt"
++STO=
++STO_DIR="/tmp"
++BASE=
++
++usage() 
++{
++cat <<EOF
++
++Usage: $0 [-s suffix] [-d sto_dir_dir] [-m mount point] base_dir
++Version 0.1
++
++This script overlays the given base directory using the mini_fo file
++system. If only the base directory base_dir is given, $0 
++will use a storage directory called "sto-<base_dir_name>" in $STO_DIR,
++and mount point "mini_fo-<base_dir_dir>" in $MNT_DIR.
++
++Options:
++     -s <suffix>
++          add given suffix to storage directory and the mount
++          point. This is usefull for overlaying one base directory
++          several times and avoiding conflicts with storage directory
++          names and mount points.
++
++     -d <sto_dir_dir>
++          change the directory in which the storage directory will be
++          created (default is currently "$STO_DIR".
++
++     -m <mount point>
++          use an alternative directory to create the mini_fo
++          mountpoint (default is currently "$MNT_DIR".
++
++     -h   displays this message.
++
++EOF
++exit 1;
++}
++
++while getopts hm:s:d: OPTS
++  do
++  case $OPTS in
++      s)  SUFF="$OPTARG";;
++      d)  STO_DIR="$OPTARG";;
++      m)  MNT_DIR="$OPTARG";;
++      h)  HELP="set";;
++      ?)  usage
++	  exit 1;;
++  esac
++done
++shift $(($OPTIND - 1))
++
++BASE="$1"
++
++if [ "x$HELP" == "xset" ]; then
++    usage
++    exit -1
++fi
++
++# fix suffix 
++if [ "x$SUFF" != "x" ]; then
++    SUFF="-$SUFF"
++fi
++
++# kill trailing slashes
++MNT_DIR=${MNT_DIR%/}
++STO_DIR=${STO_DIR%/}
++BASE=${BASE%/}
++
++
++if ! [ -d "$BASE" ]; then
++    echo "invalid base dir $BASE, run $0 -h for help."
++    exit -1
++fi
++
++# check opts
++if ! [ -d "$MNT_DIR" ]; then
++    echo "invalid mount dir $MNT_DIR, run $0 -h for help."
++    exit -1
++fi
++
++if ! [ -d "$STO_DIR" ]; then
++    echo "invalid sto_dir_dir $STO_DIR, run $0 -h for help."
++    exit -1
++fi
++
++MNTP="$MNT_DIR/mini_fo-`basename $BASE`$SUFF"
++STO="$STO_DIR/sto-`basename $BASE`$SUFF"
++
++# create the mount point if it doesn't exist
++mkdir -p $MNTP
++if [ $? -ne 0 ]; then
++    echo "Error, failed to create mount point $MNTP"
++fi
++
++mkdir -p $STO
++if [ $? -ne 0 ]; then
++    echo "Error, failed to create storage dir $STO"
++fi
++
++# check if fs is already mounted
++mount | grep mini_fo | grep $MNTP &> /dev/null
++if [ $? -eq 0 ]; then
++    echo "Error, existing mini_fo mount at $MNTP."
++    exit -1
++fi
++
++mount | grep mini_fo | grep $STO &> /dev/null
++if [ $? -eq 0 ]; then
++    echo "Error, $STO seems to be used already."
++    exit -1
++fi
++
++# mount 
++mount -t mini_fo -o base=$BASE,sto=$STO $BASE $MNTP
++
++if [ $? -ne 0 ]; then
++    echo "Error, mounting failed, maybe no permisson to mount?"
++fi
+diff -urN linux-2.6.21.1.old/fs/mini_fo/mmap.c linux-2.6.21.1.dev/fs/mini_fo/mmap.c
+--- linux-2.6.21.1.old/fs/mini_fo/mmap.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/mmap.c	2007-05-26 21:01:26.168329112 +0200
+@@ -0,0 +1,637 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ *  $Id$
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif /* HAVE_CONFIG_H */
++
++#include "fist.h"
++#include "mini_fo.h"
++
++
++#ifdef FIST_COUNT_WRITES
++/* for counting writes in the middle vs. regular writes */
++unsigned long count_writes = 0, count_writes_middle = 0;
++#endif /* FIST_COUNT_WRITES */
++
++/* forward declaration of commit write and prepare write */
++STATIC int mini_fo_commit_write(file_t *file, page_t *page, unsigned from, unsigned to);
++STATIC int mini_fo_prepare_write(file_t *file, page_t *page, unsigned from, unsigned to);
++
++
++/*
++ * Function for handling creation of holes when lseek-ing past the
++ * end of the file and then writing some data.
++ */
++int
++mini_fo_fill_zeros(file_t* file, page_t *page, unsigned from)
++{
++	int err = 0;
++	dentry_t *dentry = file->f_dentry;
++	inode_t *inode = dentry->d_inode;
++	page_t *tmp_page;
++	int index;
++
++	print_entry_location();
++
++	for (index = inode->i_size >> PAGE_CACHE_SHIFT; index < page->index; index++) {
++		tmp_page = mini_fo_get1page(file, index);
++		if (IS_ERR(tmp_page)) {
++			err = PTR_ERR(tmp_page);
++			goto out;
++		}
++
++		/*
++		 * zero out rest of the contents of the page between the appropriate
++		 * offsets.
++		 */
++		memset((char*)page_address(tmp_page) + (inode->i_size & ~PAGE_CACHE_MASK), 0, PAGE_CACHE_SIZE - (inode->i_size & ~PAGE_CACHE_MASK));
++
++		if (! (err = mini_fo_prepare_write(file, tmp_page, 0, PAGE_CACHE_SIZE)))
++			err = mini_fo_commit_write(file, tmp_page, 0, PAGE_CACHE_SIZE);
++
++		page_cache_release(tmp_page);
++		if (err < 0)
++			goto out;
++		if (current->need_resched)
++			schedule();
++	}
++
++	/* zero out appropriate parts of last page */
++
++	/*
++	 * if the encoding type is block, then adjust the 'from' (where the
++	 * zeroing will start) offset appropriately
++	 */
++	from = from & (~(FIST_ENCODING_BLOCKSIZE - 1));
++
++	if ((from - (inode->i_size & ~PAGE_CACHE_MASK)) > 0) {
++
++		memset((char*)page_address(page) + (inode->i_size & ~PAGE_CACHE_MASK), 0, from - (inode->i_size & ~PAGE_CACHE_MASK));
++		if (! (err = mini_fo_prepare_write(file, page, 0, PAGE_CACHE_SIZE)))
++			err = mini_fo_commit_write(file, page, 0, PAGE_CACHE_SIZE);
++
++		if (err < 0)
++			goto out;
++		if (current->need_resched)
++			schedule();
++	}
++
++ out:
++	print_exit_status(err);
++	return err;
++}
++
++
++
++STATIC int
++mini_fo_writepage(page_t *page)
++{
++	int err = -EIO;
++	inode_t *inode;
++	inode_t *hidden_inode;
++	page_t *hidden_page;
++	char *kaddr, *hidden_kaddr;
++
++	print_entry_location();
++
++	inode = page->mapping->host;
++	hidden_inode = itohi(inode);
++
++	/*
++	 * writepage is called when shared mmap'ed files need to write
++	 * their pages, while prepare/commit_write are called from the
++	 * non-paged write() interface.  (However, in 2.3 the two interfaces
++	 * share the same cache, while in 2.2 they didn't.)
++	 *
++	 * So we pretty much have to duplicate much of what commit_write does.
++	 */
++
++	/* find lower page (returns a locked page) */
++	hidden_page = grab_cache_page(hidden_inode->i_mapping, page->index);
++	if (!hidden_page)
++		goto out;
++
++	/* get page address, and encode it */
++	kaddr = (char *) kmap(page);
++	hidden_kaddr = (char*) kmap(hidden_page);
++	mini_fo_encode_block(kaddr, hidden_kaddr, PAGE_CACHE_SIZE, inode, inode->i_sb, page->index);
++	/* if encode_block could fail, then return error */
++	kunmap(page);
++	kunmap(hidden_page);
++
++	/* call lower writepage (expects locked page) */
++	err = hidden_inode->i_mapping->a_ops->writepage(hidden_page);
++
++	/*
++	 * update mtime and ctime of lower level file system
++	 * mini_fo' mtime and ctime are updated by generic_file_write
++	 */
++	hidden_inode->i_mtime = hidden_inode->i_ctime = CURRENT_TIME;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,1)
++	UnlockPage(hidden_page);	/* b/c grab_cache_page locked it */
++# endif /* kernel older than 2.4.1 */
++	page_cache_release(hidden_page); /* b/c grab_cache_page increased refcnt */
++
++	if (err)
++		ClearPageUptodate(page);
++	else
++		SetPageUptodate(page);
++ out:
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,1)
++	UnlockPage(page);
++# endif /* kernel 2.4.1 and newer */
++	print_exit_status(err);
++	return err;
++}
++
++
++/*
++ * get one page from cache or lower f/s, return error otherwise.
++ * returns unlocked, up-to-date page (if ok), with increased refcnt.
++ */
++page_t *
++mini_fo_get1page(file_t *file, int index)
++{
++	page_t *page;
++	dentry_t *dentry;
++	inode_t *inode;
++	struct address_space *mapping;
++	int err;
++
++	print_entry_location();
++
++	dentry = file->f_dentry; /* CPW: Moved below print_entry_location */
++	inode = dentry->d_inode;
++	mapping = inode->i_mapping;
++
++	fist_dprint(8, "%s: read page index %d pid %d\n", __FUNCTION__, index, current->pid);
++	if (index < 0) {
++		printk("%s BUG: index=%d\n", __FUNCTION__, index);
++		page = ERR_PTR(-EIO);
++		goto out;
++	}
++	page = read_cache_page(mapping,
++			       index,
++			       (filler_t *) mapping->a_ops->readpage,
++			       (void *) file);
++	if (IS_ERR(page))
++		goto out;
++	wait_on_page(page);
++	if (!Page_Uptodate(page)) {
++		lock_page(page);
++		err = mapping->a_ops->readpage(file, page);
++		if (err) {
++			page = ERR_PTR(err);
++			goto out;
++		}
++		wait_on_page(page);
++		if (!Page_Uptodate(page)) {
++			page = ERR_PTR(-EIO);
++			goto out;
++		}
++	}
++
++ out:
++	print_exit_pointer(page);
++	return page;
++}
++
++
++/*
++ * get one page from cache or lower f/s, return error otherwise.
++ * similar to get1page, but doesn't guarantee that it will return
++ * an unlocked page.
++ */
++page_t *
++mini_fo_get1page_cached(file_t *file, int index)
++{
++	page_t *page;
++	dentry_t *dentry;
++	inode_t *inode;
++	struct address_space *mapping;
++	int err;
++
++	print_entry_location();
++
++	dentry = file->f_dentry; /* CPW: Moved below print_entry_location */
++	inode = dentry->d_inode;
++	mapping = inode->i_mapping;
++
++	fist_dprint(8, "%s: read page index %d pid %d\n", __FUNCTION__, index, current->pid);
++	if (index < 0) {
++		printk("%s BUG: index=%d\n", __FUNCTION__, index);
++		page = ERR_PTR(-EIO);
++		goto out;
++	}
++	page = read_cache_page(mapping,
++			       index,
++			       (filler_t *) mapping->a_ops->readpage,
++			       (void *) file);
++	if (IS_ERR(page))
++		goto out;
++
++ out:
++	print_exit_pointer(page);
++	return page;
++}
++
++
++/*
++ * readpage is called from generic_page_read and the fault handler.
++ * If your file system uses generic_page_read for the read op, it
++ * must implement readpage.
++ *
++ * Readpage expects a locked page, and must unlock it.
++ */
++STATIC int
++mini_fo_do_readpage(file_t *file, page_t *page)
++{
++	int err = -EIO;
++	dentry_t *dentry;
++	file_t *hidden_file = NULL;
++	dentry_t *hidden_dentry;
++	inode_t *inode;
++	inode_t *hidden_inode;
++	char *page_data;
++	page_t *hidden_page;
++	char *hidden_page_data;
++	int real_size;
++
++	print_entry_location();
++
++	dentry = file->f_dentry; /* CPW: Moved below print_entry_location */
++	if (ftopd(file) != NULL)
++		hidden_file = ftohf(file);
++	hidden_dentry = dtohd(dentry);
++	inode = dentry->d_inode;
++	hidden_inode = itohi(inode);
++
++	fist_dprint(7, "%s: requesting page %d from file %s\n", __FUNCTION__, page->index, dentry->d_name.name);
++
++	MALLOC_PAGE_POINTERS(hidden_pages, num_hidden_pages);
++	MALLOC_PAGEDATA_POINTERS(hidden_pages_data, num_hidden_pages);
++	FOR_EACH_PAGE
++		CURRENT_HIDDEN_PAGE = NULL;
++
++	/* find lower page (returns a locked page) */
++	FOR_EACH_PAGE {
++		fist_dprint(8, "%s: Current page index = %d\n", __FUNCTION__, CURRENT_HIDDEN_PAGEINDEX);
++		CURRENT_HIDDEN_PAGE = read_cache_page(hidden_inode->i_mapping,
++						      CURRENT_HIDDEN_PAGEINDEX,
++						      (filler_t *) hidden_inode->i_mapping->a_ops->readpage,
++						      (void *) hidden_file);
++		if (IS_ERR(CURRENT_HIDDEN_PAGE)) {
++			err = PTR_ERR(CURRENT_HIDDEN_PAGE);
++			CURRENT_HIDDEN_PAGE = NULL;
++			goto out_release;
++		}
++	}
++
++	/*
++	 * wait for the page data to show up
++	 * (signaled by readpage as unlocking the page)
++	 */
++	FOR_EACH_PAGE {
++		wait_on_page(CURRENT_HIDDEN_PAGE);
++		if (!Page_Uptodate(CURRENT_HIDDEN_PAGE)) {
++			/*
++			 * call readpage() again if we returned from wait_on_page with a
++			 * page that's not up-to-date; that can happen when a partial
++			 * page has a few buffers which are ok, but not the whole
++			 * page.
++			 */
++			lock_page(CURRENT_HIDDEN_PAGE);
++			err = hidden_inode->i_mapping->a_ops->readpage(hidden_file,
++								       CURRENT_HIDDEN_PAGE);
++			if (err) {
++				CURRENT_HIDDEN_PAGE = NULL;
++				goto out_release;
++			}
++			wait_on_page(CURRENT_HIDDEN_PAGE);
++			if (!Page_Uptodate(CURRENT_HIDDEN_PAGE)) {
++				err = -EIO;
++				goto out_release;
++			}
++		}
++	}
++
++	/* map pages, get their addresses */
++	page_data = (char *) kmap(page);
++	FOR_EACH_PAGE
++		CURRENT_HIDDEN_PAGEDATA = (char *) kmap(CURRENT_HIDDEN_PAGE);
++
++	/* if decode_block could fail, then return error */
++	err = 0;
++	real_size = hidden_inode->i_size - (page->index << PAGE_CACHE_SHIFT);
++	if (real_size <= 0)
++		memset(page_data, 0, PAGE_CACHE_SIZE);
++	else if (real_size < PAGE_CACHE_SIZE) {
++		mini_fo_decode_block(hidden_page_data, page_data, real_size, inode, inode->i_sb, page->index);
++		memset(page_data + real_size, 0, PAGE_CACHE_SIZE - real_size);
++	} else
++		mini_fo_decode_block(hidden_page_data, page_data, PAGE_CACHE_SIZE, inode, inode->i_sb, page->index);
++
++	FOR_EACH_PAGE
++		kunmap(CURRENT_HIDDEN_PAGE);
++	kunmap(page);
++
++ out_release:
++	FOR_EACH_PAGE
++		if (CURRENT_HIDDEN_PAGE)
++			page_cache_release(CURRENT_HIDDEN_PAGE); /* undo read_cache_page */
++
++	FREE_PAGE_POINTERS(hidden_pages, num_hidden_pages);
++	FREE_PAGEDATA_POINTERS(hidden_pages_data, num_hidden_pages);
++
++ out:
++	if (err == 0)
++		SetPageUptodate(page);
++	else
++		ClearPageUptodate(page);
++
++	print_exit_status(err);
++	return err;
++}
++
++
++STATIC int
++mini_fo_readpage(file_t *file, page_t *page)
++{
++	int err;
++	print_entry_location();
++
++	err = mini_fo_do_readpage(file, page);
++
++	/*
++	 * we have to unlock our page, b/c we _might_ have gotten a locked page.
++	 * but we no longer have to wakeup on our page here, b/c UnlockPage does
++	 * it
++	 */
++	UnlockPage(page);
++
++	print_exit_status(err);
++	return err;
++}
++
++
++STATIC int
++mini_fo_prepare_write(file_t *file, page_t *page, unsigned from, unsigned to)
++{
++	int err = 0;
++
++	print_entry_location();
++
++	/*
++	 * we call kmap(page) only here, and do the kunmap
++	 * and the actual downcalls, including unlockpage and uncache
++	 * in commit_write.
++	 */
++	kmap(page);
++
++	/* fast path for whole page writes */
++	if (from == 0 && to == PAGE_CACHE_SIZE)
++		goto out;
++	/* read the page to "revalidate" our data */
++	/* call the helper function which doesn't unlock the page */
++	if (!Page_Uptodate(page))
++		err = mini_fo_do_readpage(file, page);
++
++ out:
++	print_exit_status(err);
++	return err;
++}
++
++
++
++STATIC int
++mini_fo_commit_write(file_t *file, page_t *page, unsigned from, unsigned to)
++{
++	int err = -ENOMEM;
++	inode_t *inode;
++	inode_t *hidden_inode;
++	page_t *hidden_page;
++	file_t *hidden_file = NULL;
++	loff_t pos;
++	unsigned bytes = to - from;
++	unsigned hidden_from, hidden_to, hidden_bytes;
++
++	print_entry_location();
++
++	inode = page->mapping->host; /* CPW: Moved below print_entry_location */
++	hidden_inode = itohi(inode);
++
++	ASSERT(file != NULL);
++	/*
++	 * here we have a kmapped page, with data from the user copied
++	 * into it.  we need to encode_block it, and then call the lower
++	 * commit_write.  We also need to simulate same behavior of
++	 * generic_file_write, and call prepare_write on the lower f/s first.
++	 */
++#ifdef FIST_COUNT_WRITES
++	count_writes++;
++# endif /* FIST_COUNT_WRITES */
++
++	/* this is append and/or extend -- we can't have holes so fill them in */
++	if (page->index > (hidden_inode->i_size >> PAGE_CACHE_SHIFT)) {
++		page_t *tmp_page;
++		int index;
++		for (index = hidden_inode->i_size >> PAGE_CACHE_SHIFT; index < page->index; index++) {
++			tmp_page = mini_fo_get1page(file, index);
++			if (IS_ERR(tmp_page)) {
++				err = PTR_ERR(tmp_page);
++				goto out;
++			}
++			/* zero out the contents of the page at the appropriate offsets */
++			memset((char*)page_address(tmp_page) + (inode->i_size & ~PAGE_CACHE_MASK), 0, PAGE_CACHE_SIZE - (inode->i_size & ~PAGE_CACHE_MASK));
++			if (!(err = mini_fo_prepare_write(file, tmp_page, 0, PAGE_CACHE_SIZE)))
++				err = mini_fo_commit_write(file, tmp_page, 0, PAGE_CACHE_SIZE);
++			page_cache_release(tmp_page);
++			if (err < 0)
++				goto out;
++			if (current->need_resched)
++				schedule();
++		}
++	}
++
++	if (ftopd(file) != NULL)
++		hidden_file = ftohf(file);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_inode->i_mutex);
++#else
++	down(&hidden_inode->i_sem);
++#endif
++	/* find lower page (returns a locked page) */
++	hidden_page = grab_cache_page(hidden_inode->i_mapping, page->index);
++	if (!hidden_page)
++		goto out;
++
++#if FIST_ENCODING_BLOCKSIZE > 1
++#  error encoding_blocksize greater than 1 is not yet supported
++# endif /* FIST_ENCODING_BLOCKSIZE > 1 */
++
++	hidden_from = from & (~(FIST_ENCODING_BLOCKSIZE - 1));
++	hidden_to = ((to + FIST_ENCODING_BLOCKSIZE - 1) & (~(FIST_ENCODING_BLOCKSIZE - 1)));
++	if ((page->index << PAGE_CACHE_SHIFT) + to > hidden_inode->i_size) {
++
++		/*
++		 * if this call to commit_write had introduced holes and the code
++		 * for handling holes was invoked, then the beginning of this page
++		 * must be zeroed out
++		 * zero out bytes from 'size_of_file%pagesize' to 'from'.
++		 */
++		if ((hidden_from - (inode->i_size & ~PAGE_CACHE_MASK)) > 0)
++			memset((char*)page_address(page) + (inode->i_size & ~PAGE_CACHE_MASK), 0, hidden_from - (inode->i_size & ~PAGE_CACHE_MASK));
++
++	}
++	hidden_bytes = hidden_to - hidden_from;
++
++	/* call lower prepare_write */
++	err = -EINVAL;
++	if (hidden_inode->i_mapping &&
++	    hidden_inode->i_mapping->a_ops &&
++	    hidden_inode->i_mapping->a_ops->prepare_write)
++		err = hidden_inode->i_mapping->a_ops->prepare_write(hidden_file,
++								    hidden_page,
++								    hidden_from,
++								    hidden_to);
++	if (err)
++		/* don't leave locked pages behind, esp. on an ENOSPC */
++		goto out_unlock;
++
++	fist_dprint(8, "%s: encoding %d bytes\n", __FUNCTION__, hidden_bytes);
++	mini_fo_encode_block((char *) page_address(page) + hidden_from, (char*) page_address(hidden_page) + hidden_from, hidden_bytes, inode, inode->i_sb, page->index);
++	/* if encode_block could fail, then goto unlock and return error */
++
++	/* call lower commit_write */
++	err = hidden_inode->i_mapping->a_ops->commit_write(hidden_file,
++							   hidden_page,
++							   hidden_from,
++							   hidden_to);
++
++	if (err < 0)
++		goto out_unlock;
++
++	err = bytes;	/* convert error to no. of bytes */
++
++	inode->i_blocks = hidden_inode->i_blocks;
++	/* we may have to update i_size */
++	pos = (page->index << PAGE_CACHE_SHIFT) + to;
++	if (pos > inode->i_size)
++		inode->i_size = pos;
++
++	/*
++	 * update mtime and ctime of lower level file system
++	 * mini_fo' mtime and ctime are updated by generic_file_write
++	 */
++	hidden_inode->i_mtime = hidden_inode->i_ctime = CURRENT_TIME;
++
++	mark_inode_dirty_sync(inode);
++
++ out_unlock:
++	UnlockPage(hidden_page);
++	page_cache_release(hidden_page);
++	kunmap(page);		/* kmap was done in prepare_write */
++ out:
++	/* we must set our page as up-to-date */
++	if (err < 0)
++		ClearPageUptodate(page);
++	else
++		SetPageUptodate(page);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_inode->i_mutex);
++#else
++	up(&hidden_inode->i_sem);
++#endif
++	print_exit_status(err);
++	return err;			/* assume all is ok */
++}
++
++
++STATIC int
++mini_fo_bmap(struct address_space *mapping, long block)
++{
++	int err = 0;
++	inode_t *inode;
++	inode_t *hidden_inode;
++
++	print_entry_location();
++
++	inode = (inode_t *) mapping->host;
++	hidden_inode = itohi(inode);
++
++	if (hidden_inode->i_mapping->a_ops->bmap)
++		err = hidden_inode->i_mapping->a_ops->bmap(hidden_inode->i_mapping, block);
++	print_exit_location();
++	return err;
++}
++
++
++/*
++ * This function is copied verbatim from mm/filemap.c.
++ * XXX: It should be simply moved to some header file instead -- bug Al about it!
++ */
++static inline int sync_page(struct page *page)
++{
++	struct address_space *mapping = page->mapping;
++
++	if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
++		return mapping->a_ops->sync_page(page);
++	return 0;
++}
++
++
++/*
++ * XXX: we may not need this function if not FIST_FILTER_DATA.
++ * FIXME: for FIST_FILTER_SCA, get all lower pages and sync them each.
++ */
++STATIC int
++mini_fo_sync_page(page_t *page)
++{
++	int err = 0;
++	inode_t *inode;
++	inode_t *hidden_inode;
++	page_t *hidden_page;
++
++	print_entry_location();
++
++	inode = page->mapping->host; /* CPW: Moved below print_entry_location */
++	hidden_inode = itohi(inode);
++
++	/* find lower page (returns a locked page) */
++	hidden_page = grab_cache_page(hidden_inode->i_mapping, page->index);
++	if (!hidden_page)
++		goto out;
++
++	err = sync_page(hidden_page);
++
++	UnlockPage(hidden_page);	/* b/c grab_cache_page locked it */
++	page_cache_release(hidden_page); /* b/c grab_cache_page increased refcnt */
++
++ out:
++	print_exit_status(err);
++	return err;
++}
+diff -urN linux-2.6.21.1.old/fs/mini_fo/README linux-2.6.21.1.dev/fs/mini_fo/README
+--- linux-2.6.21.1.old/fs/mini_fo/README	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/README	2007-05-26 21:01:26.168329112 +0200
+@@ -0,0 +1,163 @@
++README for the mini_fo overlay file system
++=========================================
++
++
++WHAT IS MINI_FO?
++----------------
++
++mini_fo is a virtual kernel file system that can make read-only
++file systems writable. This is done by redirecting modifying operations
++to a writeable location called "storage directory", and leaving the
++original data in the "base directory" untouched. When reading, the
++file system merges the modifed and original data so that only the
++newest versions will appear. This occurs transparently to the user,
++who can access the data like on any other read-write file system.
++
++Base and storage directories may be located on the same or on
++different partitions and may be of different file system types. While
++the storage directory obviously needs to be writable, the base may or
++may not be writable, what doesn't matter as it will no be modified
++anyway.
++
++
++WHAT IS GOOD FOR?
++-----------------
++
++The primary purpose of the mini_fo file system is to allow easy
++software updates to embedded systems, that often store their root
++file system in a read-only flash file system, but there are many
++more as for example sandboxing, or for allowing live-cds to
++permanently store information.
++
++
++BUILDING
++--------
++This should be simple. Adjust the Makefile to point to the correct
++kernel headers you want to build the module for. Then:
++
++    # make
++
++should build "mini_fo.o" for a 2.4 kernel or "mini_fo.ko" for a 2.6
++kernel.
++
++If you are building the module for you current kernel, you can install
++the module (as root):
++
++    # make install
++
++or uninstall with
++
++    # make uninstall
++
++
++USING THE FILE SYSTEM
++--------------------
++
++the general mount syntax is:
++
++   mount -t mini_fo -o base=<base directory>,sto=<storage directory>\
++                            <base directory> <mount point>
++
++Example:
++
++You have mounted a cdrom to /mnt/cdrom and want to modifiy some files
++on it:
++
++load the module (as root)
++    
++    # insmod mini_fo.o for a 2.4 kernel or
++ 
++    # insmod mini_fo.ko for a 2.6 kernel
++
++
++create a storage dir in tmp and a mountpoint for mini_fo:
++
++    # mkdir /tmp/sto
++    # mkdir /mnt/mini_fo
++
++and mount the mini_fo file system:
++
++    # mount -t mini_fo -o base=/mnt/cdrom,sto=/tmp/sto /mnt/cdrom /mnt/mini_fo
++
++
++Now the data stored on the cd can be accessed via the mini_fo
++mountpoint just like any read-write file system, files can be modified
++and deleted, new ones can be created and so on. When done unmount the
++file system:
++
++    # unmount /mnt/mini_fo
++
++Note that if the file system is mounted again using the same storage
++file system, of course it will appear in the modified state again. If
++you remount it using an new empty storage directory, it will be
++unmodified. Therefore by executing:
++
++    # cd /tmp/sto
++    # rm -rf *
++
++you can nuke all the changes you made to the original file system. But
++ remember NEVER do this while the mini_fo file system is mounted!
++
++
++Alternatively you can use the mini_fo-overlay bash script, that
++simplifies managing mini_fo mounts. See TOOLS Section.
++
++
++TOOLS
++-----
++
++mini_fo-merge (experimental):
++
++This is a bash script that will merge changes contained in the storage
++directory back to the base directory. This allows mini_fo to function
++as a cache file system by overlaying a slow (network, ...) file system
++and using a fast (ramdisk, ...) as storage. When done, changes can be
++merged back to the (slow) base with mini_fo-merge. See "mini_fo-merge
++-h" for details.
++
++It can be usefull for merging changes back after a successfull test
++(patches, software updates...)
++
++
++mini_fo-overlay:
++
++This bash script simplifies managing one or more mini_fo mounts. For
++overlaying a directory called "basedir1", you can just call:
++
++    # mini_fo-overlay basedir1
++
++This will mount mini_fo with "basedir1" as base, "/tmp/sto-basedir1/"
++as storage to "/mnt/mini_fo-basedir1/". It has more options though,
++type "mini_fo-overlay -h" for details.
++
++
++DOCUMENTATION, REPORTING BUGS, GETTING HELP
++-------------------------------------------
++
++Please visit the mini_fo project page at:
++
++http://www.denx.de/twiki/bin/view/Know/MiniFOHome
++
++
++WARNINGS
++--------
++
++Never modify the base or the storage directorys while the mini_fo
++file system is mounted, or you might crash you system. Simply accessing
++and reading should not cause any trouble.
++
++Exporting a mini_fo mount point via NFS has not been tested, and may
++or may not work.
++
++Check the RELEASE_NOTES for details on bugs and features.
++
++
++
++Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++
++This program is free software; you can redistribute it and/or
++modify it under the terms of the GNU General Public License
++as published by the Free Software Foundation; either version
++2 of the License, or (at your option) any later version.
++
++
+diff -urN linux-2.6.21.1.old/fs/mini_fo/RELEASE_NOTES linux-2.6.21.1.dev/fs/mini_fo/RELEASE_NOTES
+--- linux-2.6.21.1.old/fs/mini_fo/RELEASE_NOTES	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/RELEASE_NOTES	2007-05-26 21:01:26.168329112 +0200
+@@ -0,0 +1,111 @@
++Release:	mini_fo-0.6.1 (v0-6-1)
++Date:		21.09.2005
++
++
++Changes:
++--------
++v0-6-1:
++
++- bugfixes (see ChangeLog)
++
++- two helper scripts "mini_fo_merge" and "mini_fo_overlay" (see
++  README for details).
++
++v0-6-0:
++
++- Support for 2.4 and 2.6 (see Makefile)
++
++- Partial hard link support (creating works as expected, but already
++  existing links in the base file system will be treated as if they
++  were individual files).
++
++- Various bugfixes and cleanups.
++
++
++v0-6-0-pre1:
++
++- This is mini_fo-0-6-0-pre1! This release is a complete rewrite of
++  many vital mini_fo parts such as the old whiteout list code which
++  has been replaced by the new META subsystem.
++
++- Light weight directory renaming implemented. This means if a
++  directory is renamed via the mini_fo filesystem this will no longer
++  result in a complete copy in storage, instead only one empty
++  directory will be created. All base filed contained in the original
++  directory stay there until modified.
++
++- Special files (creating, renaming, deleting etc.) now working.
++
++- Many bugfixes and cleanup, mini_fo is now a lot more stable.
++
++
++v0-5-10:
++
++- Final release of the 0-5-* versions. Next will be a complete rewrite
++  of many features. This release contains several bugfixes related to
++  directory renaming.
++
++
++v0-5-10-pre6:
++
++- Lots of cleanup and several bugfixes related to directory deleting
++
++- Directory renaming suddenly works, what is most likely due to the
++  fact tha that "mv" is smart: if the classic rename doesn't work it
++  will assume that source and target file are on different fs and will
++  copy the directory and try to remove the source directory. Until
++  directory removing wasn't implemented, it would fail to do this and
++  rollback.
++  So, directory renaming works for now, but it doesn't yet do what you
++  would expect from a overlay fs, so use with care.
++
++
++v0-5-10-pre5:
++
++- implemented directory deleting 
++- made parsing of mount options more stable
++- New format of mount options! (See README)
++- I can't reproduce the unknown panic with 2.4.25 anymore, so I'll
++  happily assume it never existed!
++
++
++Implemented features:
++---------------------
++
++- creating hard links (see BUGS on already existing hard links)	
++- lightweight directory renaming
++- renaming device files, pipes, sockets, etc.	
++- creating, renaming, deleting of special files 
++- deleting directorys
++- general directory reading (simple "ls" )
++- creating files in existing directorys
++- creating directorys
++- renaming files.
++- reading and writing files (involves opening)
++- appending to files (creates copy in storage)
++- deleting files
++- llseek works too, what allows editors to work
++- persistency (a deleted file stay deleted over remounts)
++- use of symbolic links
++- creating of device files
++
++
++Not (yet) implemented features:
++-------------------------------
++
++- full hard link support.
++
++
++
++BUGS:
++-----
++
++Hard links in the base file system will be treated as individual
++files, not as links to one inode.
++
++The main problem with hard links isn't allowing to create them, but
++their pure existence. If you modify a base hard link, the changes made
++will only show up on this link, the other link will remain in the
++original state. I hope to fix this someday. Please note that this does
++not effect the special hard links '.' and '..', that are handled
++seperately by the lower fs.
+diff -urN linux-2.6.21.1.old/fs/mini_fo/state.c linux-2.6.21.1.dev/fs/mini_fo/state.c
+--- linux-2.6.21.1.old/fs/mini_fo/state.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/state.c	2007-05-26 21:01:26.169328960 +0200
+@@ -0,0 +1,620 @@
++/*
++ * Copyright (C) 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif /* HAVE_CONFIG_H */
++
++#include "fist.h"
++#include "mini_fo.h"
++
++
++/* create the storage file, setup new states */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++int create_sto_reg_file(dentry_t *dentry, int mode, struct nameidata *nd)
++#else
++int create_sto_reg_file(dentry_t *dentry, int mode)
++#endif
++{
++	int err = 0;
++	inode_t *dir;
++	dentry_t *hidden_sto_dentry;
++	dentry_t *hidden_sto_dir_dentry;
++
++	if(exists_in_storage(dentry)) {
++		printk(KERN_CRIT "mini_fo: create_sto_file: wrong type or state.\n");
++		err = -EINVAL;
++		goto out;
++	}
++	err = get_neg_sto_dentry(dentry);
++
++	if (err) {
++		printk(KERN_CRIT "mini_fo: create_sto_file: ERROR getting neg. sto dentry.\n");
++		goto out;
++	}
++	
++	dir = dentry->d_parent->d_inode;
++	hidden_sto_dentry = dtohd2(dentry);
++
++	/* lock parent */
++	hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++        down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++
++	err = PTR_ERR(hidden_sto_dir_dentry);
++        if (IS_ERR(hidden_sto_dir_dentry))
++                goto out;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++	err = vfs_create(hidden_sto_dir_dentry->d_inode,
++			 hidden_sto_dentry,
++			 mode, nd);
++#else
++	err = vfs_create(hidden_sto_dir_dentry->d_inode,
++			 hidden_sto_dentry,
++			 mode);
++#endif
++        if(err) {
++		printk(KERN_CRIT "mini_fo: create_sto_file: ERROR creating sto file.\n");
++                goto out_lock;
++	}
++
++	if(!dtohd2(dentry)->d_inode) {
++		printk(KERN_CRIT "mini_fo: create_sto_file: ERROR creating sto file [2].\n");
++                err = -EINVAL;
++                goto out_lock;
++        }
++
++        /* interpose the new inode */
++        if(dtost(dentry) == DELETED) {
++                dtost(dentry) = DEL_REWRITTEN;
++                err = mini_fo_tri_interpose(NULL, hidden_sto_dentry, dentry, dir->i_sb, 0);
++                if(err)
++                        goto out_lock;
++        }
++        else if(dtost(dentry) == NON_EXISTANT) {
++                dtost(dentry) = CREATED;
++                err = mini_fo_tri_interpose(dtohd(dentry), hidden_sto_dentry, dentry, dir->i_sb, 0);
++                if(err)
++                        goto out_lock;
++        }
++        else if(dtost(dentry) == UNMODIFIED) {
++                dtost(dentry) = MODIFIED;
++                /* interpose on new inode */
++                if(itohi2(dentry->d_inode) != NULL) {
++                        printk(KERN_CRIT "mini_fo: create_sto_file: invalid inode detected.\n");
++                        err = -EINVAL;
++                        goto out_lock;
++                }
++                itohi2(dentry->d_inode) = igrab(dtohd2(dentry)->d_inode);
++	}
++	fist_copy_attr_timesizes(dentry->d_parent->d_inode, 
++				 hidden_sto_dir_dentry->d_inode);
++
++ out_lock:
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++        dput(hidden_sto_dir_dentry);
++ out:
++	return err;
++}
++
++/* create the sto dir, setup states */
++int create_sto_dir(dentry_t *dentry, int mode)
++{
++	int err = 0;
++	inode_t *dir;
++	dentry_t *hidden_sto_dentry;
++        dentry_t *hidden_sto_dir_dentry;
++
++	/* had to take the "!S_ISDIR(mode))" check out, because it failed */
++	if(exists_in_storage(dentry)) {
++                printk(KERN_CRIT "mini_fo: create_sto_dir: wrong type or state.\\
++n");
++                err = -EINVAL;
++                goto out;
++        }
++	
++	err = get_neg_sto_dentry(dentry);
++	if(err) {
++		err = -EINVAL;
++		goto out;
++	}
++
++	dir = dentry->d_parent->d_inode;
++	hidden_sto_dentry = dtohd2(dentry);
++
++	/* was: hidden_sto_dir_dentry = lock_parent(hidden_sto_dentry); */
++	hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++
++	err = PTR_ERR(hidden_sto_dir_dentry);
++	if (IS_ERR(hidden_sto_dir_dentry))
++		goto out;
++	
++	err = vfs_mkdir(hidden_sto_dir_dentry->d_inode,
++			hidden_sto_dentry,
++			mode);
++	if(err) {
++		printk(KERN_CRIT "mini_fo: create_sto_dir: ERROR creating sto dir.\n");
++		goto out_lock;
++	}
++
++	if(!dtohd2(dentry)->d_inode) {
++		printk(KERN_CRIT "mini_fo: create_sto_dir: ERROR creating sto dir [2].\n");
++		err = -EINVAL;
++		goto out_lock;
++	}
++
++	/* interpose the new inode */
++	if(dtost(dentry) == DELETED) {
++		dtost(dentry) = DEL_REWRITTEN;
++		err = mini_fo_tri_interpose(NULL, hidden_sto_dentry, dentry, dir->i_sb, 0);
++		if(err)
++			goto out_lock;
++	}
++	else if(dtopd(dentry)->state == NON_EXISTANT) {
++		dtopd(dentry)->state = CREATED;
++		err = mini_fo_tri_interpose(dtohd(dentry), hidden_sto_dentry, dentry, dir->i_sb, 0);
++		if(err)
++			goto out_lock;
++	}
++	else if(dtopd(dentry)->state == UNMODIFIED) {
++		dtopd(dentry)->state = MODIFIED;
++		/* interpose on new inode */
++		if(itohi2(dentry->d_inode) != NULL) {
++			printk(KERN_CRIT "mini_fo:  create_sto_dir: ERROR, invalid inode detected.\n");
++			err = -EINVAL;
++			goto out_lock;
++		}
++		itohi2(dentry->d_inode) = igrab(dtohd2(dentry)->d_inode);
++	}
++
++	fist_copy_attr_timesizes(dir, hidden_sto_dir_dentry->d_inode);
++
++	/* initalize the wol list */
++	itopd(dentry->d_inode)->deleted_list_size = -1;
++	itopd(dentry->d_inode)->renamed_list_size = -1;
++	meta_build_lists(dentry);
++
++
++ out_lock:
++	/* was: unlock_dir(hidden_sto_dir_dentry); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	dput(hidden_sto_dir_dentry);
++ out:
++	return err;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++int create_sto_nod(dentry_t *dentry, int mode, dev_t dev) 
++#else
++int create_sto_nod(dentry_t *dentry, int mode, int dev) 
++#endif
++{
++	int err = 0;
++	inode_t *dir;
++	dentry_t *hidden_sto_dentry;
++	dentry_t *hidden_sto_dir_dentry;
++
++	if(exists_in_storage(dentry)) {
++		err = -EEXIST;
++		goto out;
++	}
++	err = get_neg_sto_dentry(dentry);
++
++	if (err) {
++                printk(KERN_CRIT "mini_fo: create_sto_nod: ERROR getting neg. sto dentry.\n");
++                goto out;
++        }	
++
++	dir = dentry->d_parent->d_inode;
++	hidden_sto_dentry = dtohd2(dentry);
++	
++	/* lock parent */
++	hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	
++	err = PTR_ERR(hidden_sto_dir_dentry);
++	if (IS_ERR(hidden_sto_dir_dentry))
++		goto out;
++
++	err = vfs_mknod(hidden_sto_dir_dentry->d_inode, hidden_sto_dentry, mode, dev);
++	if(err)
++		goto out_lock;
++
++	if(!dtohd2(dentry)->d_inode) {
++		printk(KERN_CRIT "mini_fo: create_sto_nod: creating storage inode failed [1].\n");
++		err = -EINVAL; /* return something indicating failure */
++		goto out_lock;
++	}
++
++	/* interpose the new inode */
++	if(dtost(dentry) == DELETED) {
++		dtost(dentry) = DEL_REWRITTEN;
++		err = mini_fo_tri_interpose(NULL, hidden_sto_dentry, dentry, dir->i_sb, 0);
++		if(err)
++			goto out_lock;
++	}
++	else if(dtost(dentry) == NON_EXISTANT) {
++		dtost(dentry) = CREATED;
++		err = mini_fo_tri_interpose(dtohd(dentry), hidden_sto_dentry, dentry, dir->i_sb, 0);
++		if(err)
++			goto out_lock;
++	}
++	else if(dtost(dentry) == UNMODIFIED) {
++		dtost(dentry) = MODIFIED;
++		/* interpose on new inode */
++		if(itohi2(dentry->d_inode) != NULL) {
++			printk(KERN_CRIT "mini_fo: create_sto_nod: error, invalid inode detected.\n");
++			err = -EINVAL;
++			goto out_lock;
++		}
++		itohi2(dentry->d_inode) = igrab(dtohd2(dentry)->d_inode);
++	}
++
++	fist_copy_attr_timesizes(dir, hidden_sto_dir_dentry->d_inode);
++
++ out_lock:
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	dput(hidden_sto_dir_dentry);
++ out:
++	return err;
++}
++
++
++/* unimplemented (and possibly not usefull): 
++
++   nondir-del_to_del_rew
++   nondir-non_exist_to_creat
++
++   dir-unmod_to_del
++   dir-mod_to_del
++   dir-creat_to_del
++   dir-del_rew_to_del
++   dir-del_to_del_rew
++   dir-non_exist_to_creat
++*/
++
++
++/* bring a file of any type from state UNMODIFIED to MODIFIED */
++int nondir_unmod_to_mod(dentry_t *dentry, int cp_flag) 
++{
++	int err = 0;
++	struct vfsmount *tgt_mnt;
++	struct vfsmount *src_mnt;
++	dentry_t *tgt_dentry;
++	dentry_t *src_dentry;
++	dentry_t *hidden_sto_dentry;
++	dentry_t *hidden_sto_dir_dentry;
++
++	check_mini_fo_dentry(dentry);
++
++	if((dtost(dentry) != UNMODIFIED) ||
++	   S_ISDIR(dentry->d_inode->i_mode)) {
++		printk(KERN_CRIT "mini_fo: nondir_unmod_to_mod: \
++                                  wrong type or state.\n");
++		err = -1;
++		goto out;
++	}
++	err = get_neg_sto_dentry(dentry);
++
++	if (err) {
++		printk(KERN_CRIT "mini_fo: nondir_unmod_to_mod: \
++                                  ERROR getting neg. sto dentry.\n");
++		goto out;
++	}
++	
++	/* create sto file */
++	hidden_sto_dentry = dtohd2(dentry);
++
++	/* lock parent */
++	hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++        down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++
++	err = PTR_ERR(hidden_sto_dir_dentry);
++        if (IS_ERR(hidden_sto_dir_dentry))
++                goto out;
++
++	/* handle different types of nondirs */
++	if(S_ISCHR(dentry->d_inode->i_mode) ||
++	   S_ISBLK(dentry->d_inode->i_mode)) {
++		err = vfs_mknod(hidden_sto_dir_dentry->d_inode,
++				hidden_sto_dentry,
++				dtohd(dentry)->d_inode->i_mode,
++				dtohd(dentry)->d_inode->i_rdev);
++	}
++	
++	else if(S_ISREG(dentry->d_inode->i_mode)) {
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++		err = vfs_create(hidden_sto_dir_dentry->d_inode,
++				 hidden_sto_dentry,
++				 dtohd(dentry)->d_inode->i_mode, NULL);
++#else
++		err = vfs_create(hidden_sto_dir_dentry->d_inode,
++				 hidden_sto_dentry,
++				 dtohd(dentry)->d_inode->i_mode);
++#endif
++	}
++        if(err) {
++		printk(KERN_CRIT "mini_fo: nondir_unmod_to_mod: \
++                                  ERROR creating sto file.\n");
++                goto out_lock;
++	}
++
++	/* interpose on new inode */
++	if(itohi2(dentry->d_inode) != NULL) {
++		printk(KERN_CRIT "mini_fo: nondir_unmod_to_mod: \
++                                  ERROR, invalid inode detected.\n");
++		err = -EINVAL;
++		goto out_lock;
++	}
++
++	itohi2(dentry->d_inode) = igrab(dtohd2(dentry)->d_inode);
++        
++        fist_copy_attr_timesizes(dentry->d_parent->d_inode, 
++				 hidden_sto_dir_dentry->d_inode);
++	dtost(dentry) = MODIFIED;
++
++	/* copy contents if regular file and cp_flag = 1 */
++	if((cp_flag == 1) && S_ISREG(dentry->d_inode->i_mode)) {
++
++		/* unlock first */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++		mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++		up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++
++		dput(hidden_sto_dir_dentry);
++
++		tgt_dentry = dtohd2(dentry);
++		tgt_mnt = stopd(dentry->d_inode->i_sb)->hidden_mnt2;
++		src_dentry = dtohd(dentry);
++		src_mnt = stopd(dentry->d_inode->i_sb)->hidden_mnt;
++		
++		err = mini_fo_cp_cont(tgt_dentry, tgt_mnt, 
++				      src_dentry, src_mnt);
++		if(err) {
++			printk(KERN_CRIT "mini_fo: nondir_unmod_to_mod: \
++                                          ERROR copying contents.\n");
++		}
++		goto out;	
++	}
++
++ out_lock:
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++        dput(hidden_sto_dir_dentry);
++ out:
++	return err;
++}
++
++/* this function is currently identical to nondir_creat_to_del */
++int nondir_del_rew_to_del(dentry_t *dentry)
++{
++	return nondir_creat_to_del(dentry);
++}
++
++int nondir_creat_to_del(dentry_t *dentry) 
++{
++	int err = 0;
++
++	inode_t *hidden_sto_dir_inode;
++	dentry_t *hidden_sto_dir_dentry;
++	dentry_t *hidden_sto_dentry;
++	
++	check_mini_fo_dentry(dentry);
++
++	/* for now this function serves for both state DEL_REWRITTEN and 
++	 * CREATED */
++	if(!(dtost(dentry) == CREATED || (dtost(dentry) == DEL_REWRITTEN)) ||
++	   S_ISDIR(dentry->d_inode->i_mode)) {
++		printk(KERN_CRIT "mini_fo: nondir_mod_to_del/del_rew_to_del: \
++                                  wrong type or state.\n");
++		err = -1;
++		goto out;
++	}
++	
++	hidden_sto_dir_inode = itohi2(dentry->d_parent->d_inode);
++	hidden_sto_dentry = dtohd2(dentry);
++	
++	/* was: hidden_sto_dir_dentry = lock_parent(hidden_sto_dentry);*/
++	hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	
++	/* avoid destroying the hidden inode if the file is in use */
++	dget(hidden_sto_dentry);
++	err = vfs_unlink(hidden_sto_dir_inode, hidden_sto_dentry);
++	dput(hidden_sto_dentry);
++	if(!err)
++		d_delete(hidden_sto_dentry);
++	
++	/* propagate number of hard-links */
++	dentry->d_inode->i_nlink = itohi2(dentry->d_inode)->i_nlink;
++	
++	dtost(dentry) = NON_EXISTANT;
++	
++	/* was: unlock_dir(hidden_sto_dir_dentry); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	dput(hidden_sto_dir_dentry);
++	
++ out:
++	return err;
++}
++
++int nondir_mod_to_del(dentry_t *dentry)
++{
++	int err;
++	dentry_t *hidden_sto_dentry;
++	inode_t *hidden_sto_dir_inode;
++	dentry_t *hidden_sto_dir_dentry;
++	
++	check_mini_fo_dentry(dentry);
++
++	if(dtost(dentry) != MODIFIED ||
++	   S_ISDIR(dentry->d_inode->i_mode)) {
++		printk(KERN_CRIT "mini_fo: nondir_mod_to_del: \
++                                  wrong type or state.\n");
++		err = -1;
++		goto out;
++	}
++
++	hidden_sto_dir_inode = itohi2(dentry->d_parent->d_inode);
++	hidden_sto_dentry = dtohd2(dentry);
++	
++	/* was hidden_sto_dir_dentry = lock_parent(hidden_sto_dentry); */
++	hidden_sto_dir_dentry = dget(hidden_sto_dentry->d_parent);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_lock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	down(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	
++	/* avoid destroying the hidden inode if the file is in use */
++	dget(hidden_sto_dentry);
++	err = vfs_unlink(hidden_sto_dir_inode, hidden_sto_dentry);
++	dput(hidden_sto_dentry);
++	if(!err)
++		d_delete(hidden_sto_dentry);
++	
++	/* propagate number of hard-links */
++	dentry->d_inode->i_nlink = itohi2(dentry->d_inode)->i_nlink;
++	
++	/* dput base dentry, this will relase the inode and free the
++	 * dentry, as we will never need it again. */
++	dput(dtohd(dentry));
++	dtohd(dentry) = NULL;
++	dtost(dentry) = DELETED;
++
++	/* add deleted file to META-file */
++	meta_add_d_entry(dentry->d_parent, 
++			 dentry->d_name.name, 
++			 dentry->d_name.len);
++	
++	/* was: unlock_dir(hidden_sto_dir_dentry); */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++	mutex_unlock(&hidden_sto_dir_dentry->d_inode->i_mutex);
++#else
++	up(&hidden_sto_dir_dentry->d_inode->i_sem);
++#endif
++	dput(hidden_sto_dir_dentry);
++
++ out:
++	return err;
++}
++
++int nondir_unmod_to_del(dentry_t *dentry)
++{
++	int err = 0;
++
++	check_mini_fo_dentry(dentry);
++
++	if(dtost(dentry) != UNMODIFIED ||
++	   S_ISDIR(dentry->d_inode->i_mode)) {
++		printk(KERN_CRIT "mini_fo: nondir_unmod_to_del: \
++                                  wrong type or state.\n");
++		err = -1;
++		goto out;
++	}
++	
++	 /* next we have to get a negative dentry for the storage file */
++	err = get_neg_sto_dentry(dentry);
++
++	if(err)
++		goto out;		
++
++	/* add deleted file to META lists */
++	err = meta_add_d_entry(dentry->d_parent, 
++			       dentry->d_name.name, 
++			       dentry->d_name.len);
++
++	if(err)
++		goto out;
++	
++	/* dput base dentry, this will relase the inode and free the
++	 * dentry, as we will never need it again. */
++	dput(dtohd(dentry));
++	dtohd(dentry) = NULL;
++	dtost(dentry) = DELETED;
++	
++ out:
++	return err;
++}
++
++/* bring a dir from state UNMODIFIED to MODIFIED */
++int dir_unmod_to_mod(dentry_t *dentry) 
++{
++	int err;
++
++	check_mini_fo_dentry(dentry);
++
++	if(dtost(dentry) != UNMODIFIED ||
++	   !S_ISDIR(dentry->d_inode->i_mode)) {
++		printk(KERN_CRIT "mini_fo: dir_unmod_to_mod: \
++                                  wrong type or state.\n");
++		err = -1;
++		goto out;
++	}
++
++	/* this creates our dir incl. sto. structure */
++	err = build_sto_structure(dentry->d_parent, dentry);
++	if(err) {
++		printk(KERN_CRIT "mini_fo: dir_unmod_to_mod: \
++                                  build_sto_structure failed.\n");
++		goto out;
++	}
++ out:
++	return err;
++}
++
+diff -urN linux-2.6.21.1.old/fs/mini_fo/super.c linux-2.6.21.1.dev/fs/mini_fo/super.c
+--- linux-2.6.21.1.old/fs/mini_fo/super.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.dev/fs/mini_fo/super.c	2007-05-26 21:01:26.169328960 +0200
+@@ -0,0 +1,281 @@
++/*
++ * Copyright (c) 1997-2003 Erez Zadok
++ * Copyright (c) 2001-2003 Stony Brook University
++ *
++ * For specific licensing information, see the COPYING file distributed with
++ * this package, or get one from ftp://ftp.filesystems.org/pub/fist/COPYING.
++ *
++ * This Copyright notice must be kept intact and distributed with all
++ * fistgen sources INCLUDING sources generated by fistgen.
++ */
++/*
++ * Copyright (C) 2004, 2005 Markus Klotzbuecher <mk@creamnet.de>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/*
++ *  $Id$
++ */
++
++#ifdef HAVE_CONFIG_H
++# include <config.h>
++#endif 
++
++#include "fist.h"
++#include "mini_fo.h"
++
++
++STATIC void
++mini_fo_read_inode(inode_t *inode)
++{
++	static struct address_space_operations mini_fo_empty_aops;
++
++	__itopd(inode) = kmalloc(sizeof(struct mini_fo_inode_info), GFP_KERNEL);
++	if (!itopd(inode)) {
++		printk("<0>%s:%s:%d: No kernel memory!\n", __FILE__, __FUNCTION__, __LINE__);
++		ASSERT(NULL);
++	}
++	itohi(inode) = NULL;
++	itohi2(inode) = NULL;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++	inode->i_version++;
++#else
++	inode->i_version = ++event;	/* increment inode version */
++#endif
++	inode->i_op = &mini_fo_main_iops;
++	inode->i_fop = &mini_fo_main_fops;
++#if 0
++	/*
++	 * XXX: To export a file system via NFS, it has to have the
++	 * FS_REQUIRES_DEV flag, so turn it on.  But should we inherit it from
++	 * the lower file system, or can we allow our file system to be exported
++	 * even if the lower one cannot be natively exported.
++	 */
++	inode->i_sb->s_type->fs_flags |= FS_REQUIRES_DEV;
++	/*
++	 * OK, the above was a hack, which is now turned off because it may
++	 * cause a panic/oops on some systems.  The correct way to export a
++	 * "nodev" filesystem is via using nfs-utils > 1.0 and the "fsid=" export
++	 * parameter, which requires 2.4.20 or later.
++	 */
++#endif
++	/* I don't think ->a_ops is ever allowed to be NULL */
++	inode->i_mapping->a_ops = &mini_fo_empty_aops;
++}
++
++
++#if defined(FIST_DEBUG) || defined(FIST_FILTER_SCA)
++/*
++ * No need to call write_inode() on the lower inode, as it
++ * will have been marked 'dirty' anyway. But we might need
++ * to write some of our own stuff to disk.
++ */
++STATIC void
++mini_fo_write_inode(inode_t *inode, int sync)
++{
++	print_entry_location();
++	print_exit_location();
++}
++#endif /* defined(FIST_DEBUG) || defined(FIST_FILTER_SCA) */
++
++
++STATIC void
++mini_fo_put_inode(inode_t *inode)
++{
++	/*
++	 * This is really funky stuff:
++	 * Basically, if i_count == 1, iput will then decrement it and this inode will be destroyed.
++	 * It is currently holding a reference to the hidden inode.
++	 * Therefore, it needs to release that reference by calling iput on the hidden inode.
++	 * iput() _will_ do it for us (by calling our clear_inode), but _only_ if i_nlink == 0.
++	 * The problem is, NFS keeps i_nlink == 1 for silly_rename'd files.
++	 * So we must for our i_nlink to 0 here to trick iput() into calling our clear_inode.
++	 */
++	if (atomic_read(&inode->i_count) == 1)
++		inode->i_nlink = 0;
++}
++
++
++#if defined(FIST_DEBUG) || defined(FIST_FILTER_SCA)
++/*
++ * we now define delete_inode, because there are two VFS paths that may
++ * destroy an inode: one of them calls clear inode before doing everything
++ * else that's needed, and the other is fine.  This way we truncate the inode
++ * size (and its pages) and then clear our own inode, which will do an iput
++ * on our and the lower inode.
++ */
++STATIC void
++mini_fo_delete_inode(inode_t *inode)
++{
++	print_entry_location();
++
++	fist_checkinode(inode, "mini_fo_delete_inode IN");
++	inode->i_size = 0;		/* every f/s seems to do that */
++	clear_inode(inode);
++
++	print_exit_location();
++}
++#endif /* defined(FIST_DEBUG) || defined(FIST_FILTER_SCA) */
++
++
++/* final actions when unmounting a file system */
++STATIC void
++mini_fo_put_super(super_block_t *sb)
++{
++	if (stopd(sb)) {
++		mntput(stopd(sb)->hidden_mnt);
++		mntput(stopd(sb)->hidden_mnt2);
++
++		/* mk: no! dput(stopd(sb)->base_dir_dentry); 
++		   dput(stopd(sb)->storage_dir_dentry); */
++
++		kfree(stopd(sb));
++		__stopd(sb) = NULL;
++	}
++}
++
++
++#ifdef NOT_NEEDED
++/*
++ * This is called in do_umount before put_super.
++ * The superblock lock is not held yet.
++ * We probably do not need to define this or call write_super
++ * on the hidden_sb, because sync_supers() will get to hidden_sb
++ * sooner or later.  But it is also called from file_fsync()...
++ */
++STATIC void
++mini_fo_write_super(super_block_t *sb)
++{
++	return;
++}
++#endif /* NOT_NEEDED */
++
++
++STATIC int
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++mini_fo_statfs(struct dentry *d, struct kstatfs *buf)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++mini_fo_statfs(super_block_t *sb, struct kstatfs *buf)
++#else
++mini_fo_statfs(super_block_t *sb, struct statfs *buf)
++#endif
++{
++	int err = 0;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++	struct dentry *hidden_d;
++
++	hidden_d = dtohd(d);
++	err = vfs_statfs(hidden_d, buf);
++#else
++	super_block_t *hidden_sb;
++
++	hidden_sb = stohs(sb);
++	err = vfs_statfs(hidden_sb, buf);
++#endif
++
++	return err;
++}
++
++
++/*
++ * XXX: not implemented.  This is not allowed yet.
++ * Should we call this on the hidden_sb?  Probably not.
++ */
++STATIC int
++mini_fo_remount_fs(super_block_t *sb, int *flags, char *data)
++{
++	//printk(KERN_CRIT "mini_fo_remount_fs: WARNING, this function is umimplemented.\n");
++	return -ENOSYS;
++}
++
++
++/*
++ * Called by iput() when the inode reference count reached zero
++ * and the inode is not hashed anywhere.  Used to clear anything
++ * that needs to be, before the inode is completely destroyed and put
++ * on the inode free list.
++ */
++STATIC void
++mini_fo_clear_inode(inode_t *inode)
++{
++	/*
++	 * Decrement a reference to a hidden_inode, which was incremented
++	 * by our read_inode when it was created initially.
++	 */
++
++	/* release the wol_list */
++	if(S_ISDIR(inode->i_mode)) {
++		__meta_put_lists(inode);
++	}
++
++	/* mk: fan out fun */
++	if(itohi(inode))
++		iput(itohi(inode));
++	if(itohi2(inode))
++		iput(itohi2(inode));
++
++	// XXX: why this assertion fails?
++	// because it doesn't like us
++	// ASSERT((inode->i_state & I_DIRTY) == 0);
++	kfree(itopd(inode));
++	__itopd(inode) = NULL;
++}
++
++
++/*
++ * Called in do_umount() if the MNT_FORCE flag was used and this
++ * function is defined.  See comment in linux/fs/super.c:do_umount().
++ * Used only in nfs, to kill any pending RPC tasks, so that subsequent
++ * code can actually succeed and won't leave tasks that need handling.
++ *
++ * PS. I wonder if this is somehow useful to undo damage that was
++ * left in the kernel after a user level file server (such as amd)
++ * dies.
++ */
++STATIC void
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
++mini_fo_umount_begin(struct vfsmount *mnt, int flags)
++{
++	struct vfsmount *hidden_mnt;
++
++	hidden_mnt = stopd(mnt->mnt_sb)->hidden_mnt;
++
++	if (hidden_mnt->mnt_sb->s_op->umount_begin)
++		hidden_mnt->mnt_sb->s_op->umount_begin(hidden_mnt, flags);
++
++}
++#else
++mini_fo_umount_begin(super_block_t *sb)
++{
++	super_block_t *hidden_sb;
++
++	hidden_sb = stohs(sb);
++
++	if (hidden_sb->s_op->umount_begin)
++		hidden_sb->s_op->umount_begin(hidden_sb);
++
++}
++#endif
++
++
++struct super_operations mini_fo_sops =
++{
++	read_inode:		mini_fo_read_inode,
++#if defined(FIST_DEBUG) || defined(FIST_FILTER_SCA)
++	write_inode:	mini_fo_write_inode,
++#endif /* defined(FIST_DEBUG) || defined(FIST_FILTER_SCA) */
++	put_inode:		mini_fo_put_inode,
++#if defined(FIST_DEBUG) || defined(FIST_FILTER_SCA)
++	delete_inode:	mini_fo_delete_inode,
++#endif /* defined(FIST_DEBUG) || defined(FIST_FILTER_SCA) */
++	put_super:		mini_fo_put_super,
++	statfs:		mini_fo_statfs,
++	remount_fs:		mini_fo_remount_fs,
++	clear_inode:	mini_fo_clear_inode,
++	umount_begin:	mini_fo_umount_begin,
++};
diff --git a/target/linux/generic-2.6/patches-2.6.22/210-d80211_compat.patch b/target/linux/generic-2.6/patches-2.6.22/210-d80211_compat.patch
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/target/linux/generic-2.6/patches-2.6.22/211-no_block2mtd_readahead.patch b/target/linux/generic-2.6/patches-2.6.22/211-no_block2mtd_readahead.patch
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/target/linux/generic-2.6/patches-2.6.22/212-block2mtd_erase_scan.patch b/target/linux/generic-2.6/patches-2.6.22/212-block2mtd_erase_scan.patch
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/target/linux/generic-2.6/patches-2.6.22/510-Yaffs.patch b/target/linux/generic-2.6/patches-2.6.22/510-Yaffs.patch
new file mode 100644
index 0000000000..99775d750e
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/510-Yaffs.patch
@@ -0,0 +1,15021 @@
+diff -urN linux-2.6.21.1/fs/Kconfig linux-2.6.21.1.new/fs/Kconfig
+--- linux-2.6.21.1/fs/Kconfig	2007-06-08 14:07:09.000000000 +0200
++++ linux-2.6.21.1.new/fs/Kconfig	2007-06-08 14:09:26.000000000 +0200
+@@ -419,6 +419,7 @@
+ 
+ source "fs/xfs/Kconfig"
+ source "fs/gfs2/Kconfig"
++source "fs/yaffs2/Kconfig"
+ 
+ config OCFS2_FS
+ 	tristate "OCFS2 file system support"
+diff -urN linux-2.6.21.1/fs/Makefile linux-2.6.21.1.new/fs/Makefile
+--- linux-2.6.21.1/fs/Makefile	2007-06-08 14:07:09.000000000 +0200
++++ linux-2.6.21.1.new/fs/Makefile	2007-06-08 14:09:51.000000000 +0200
+@@ -116,3 +116,4 @@
+ obj-$(CONFIG_DEBUG_FS)		+= debugfs/
+ obj-$(CONFIG_OCFS2_FS)		+= ocfs2/
+ obj-$(CONFIG_GFS2_FS)           += gfs2/
++obj-$(CONFIG_YAFFS_FS)		+= yaffs2/
+diff -urN linux-2.6.21.1/fs/yaffs2/Kconfig linux-2.6.21.1.new/fs/yaffs2/Kconfig
+--- linux-2.6.21.1/fs/yaffs2/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/Kconfig	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,175 @@
++#
++# YAFFS file system configurations
++#
++
++config YAFFS_FS
++	tristate "YAFFS2 file system support"
++	default n
++	depends on MTD
++	select YAFFS_YAFFS1
++	select YAFFS_YAFFS2
++	help
++	  YAFFS2, or Yet Another Flash Filing System, is a filing system
++	  optimised for NAND Flash chips.
++
++	  To compile the YAFFS2 file system support as a module, choose M
++	  here: the module will be called yaffs2.
++
++	  If unsure, say N.
++
++	  Further information on YAFFS2 is available at
++	  <http://www.aleph1.co.uk/yaffs/>.
++
++config YAFFS_YAFFS1
++	bool "512 byte / page devices"
++	depends on YAFFS_FS
++	default y
++	help
++	  Enable YAFFS1 support -- yaffs for 512 byte / page devices
++	  
++	  Not needed for 2K-page devices.
++
++	  If unsure, say Y.
++
++config YAFFS_9BYTE_TAGS
++	bool "Use older-style on-NAND data format with pageStatus byte"
++	depends on YAFFS_YAFFS1
++	default n
++	help
++
++	  Older-style on-NAND data format has a "pageStatus" byte to record
++	  chunk/page state.  This byte is zero when the page is discarded.
++	  Choose this option if you have existing on-NAND data using this
++	  format that you need to continue to support.  New data written
++	  also uses the older-style format.  Note: Use of this option
++	  generally requires that MTD's oob layout be adjusted to use the
++	  older-style format.  See notes on tags formats and MTD versions.
++
++	  If unsure, say N.
++
++config YAFFS_DOES_ECC
++	bool "Lets Yaffs do its own ECC"
++	depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
++	default n
++	help
++	  This enables Yaffs to use its own ECC functions instead of using
++	  the ones from the generic MTD-NAND driver.
++
++	  If unsure, say N.
++
++config YAFFS_ECC_WRONG_ORDER
++	bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
++	depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
++	default n
++	help
++	  This makes yaffs_ecc.c use the same ecc byte order as Steven
++	  Hill's nand_ecc.c. If not set, then you get the same ecc byte
++	  order as SmartMedia.
++
++	  If unsure, say N.
++
++config YAFFS_YAFFS2
++	bool "2048 byte (or larger) / page devices"
++	depends on YAFFS_FS
++	default y
++	help
++	  Enable YAFFS2 support -- yaffs for >= 2K bytes per page devices
++
++	  If unsure, say Y.
++
++config YAFFS_AUTO_YAFFS2
++	bool "Autoselect yaffs2 format"
++	depends on YAFFS_YAFFS2
++	default y
++	help
++	  Without this, you need to explicitely use yaffs2 as the file
++	  system type. With this, you can say "yaffs" and yaffs or yaffs2
++	  will be used depending on the device page size (yaffs on
++	  512-byte page devices, yaffs2 on 2K page devices).
++
++	  If unsure, say Y.
++
++config YAFFS_DISABLE_LAZY_LOAD
++	bool "Disable lazy loading"
++	depends on YAFFS_YAFFS2
++	default n
++	help
++	  "Lazy loading" defers loading file details until they are
++	  required. This saves mount time, but makes the first look-up
++	  a bit longer.
++
++	  Lazy loading will only happen if enabled by this option being 'n'
++	  and if the appropriate tags are available, else yaffs2 will
++	  automatically fall back to immediate loading and do the right
++	  thing.
++
++	  Lazy laoding will be required by checkpointing.
++
++	  Setting this to 'y' will disable lazy loading.
++
++	  If unsure, say N.
++
++config YAFFS_CHECKPOINT_RESERVED_BLOCKS
++	int "Reserved blocks for checkpointing"
++	depends on YAFFS_YAFFS2
++	default 10
++	help
++          Give the number of Blocks to reserve for checkpointing.
++	  Checkpointing saves the state at unmount so that mounting is
++	  much faster as a scan of all the flash to regenerate this state
++	  is not needed.  These Blocks are reserved per partition, so if
++	  you have very small partitions the default (10) may be a mess
++	  for you.  You can set this value to 0, but that does not mean
++	  checkpointing is disabled at all. There only won't be any
++	  specially reserved blocks for checkpointing, so if there is
++	  enough free space on the filesystem, it will be used for
++	  checkpointing.
++
++	  If unsure, leave at default (10), but don't wonder if there are
++	  always 2MB used on your large page device partition (10 x 2k
++	  pagesize). When using small partitions or when being very small
++	  on space, you probably want to set this to zero.
++
++config YAFFS_DISABLE_WIDE_TNODES
++	bool "Turn off wide tnodes"
++	depends on YAFFS_FS
++	default n
++	help
++	  Wide tnodes are only used for NAND arrays >=32MB for 512-byte
++	  page devices and >=128MB for 2k page devices. They use slightly
++	  more RAM but are faster since they eliminate chunk group
++	  searching.
++
++	  Setting this to 'y' will force tnode width to 16 bits and save
++	  memory but make large arrays slower.
++
++	  If unsure, say N.
++
++config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++	bool "Force chunk erase check"
++	depends on YAFFS_FS
++	default n
++	help
++          Normally YAFFS only checks chunks before writing until an erased
++	  chunk is found. This helps to detect any partially written
++	  chunks that might have happened due to power loss.
++
++	  Enabling this forces on the test that chunks are erased in flash
++	  before writing to them. This takes more time but is potentially
++	  a bit more secure.
++ 
++	  Suggest setting Y during development and ironing out driver
++	  issues etc. Suggest setting to N if you want faster writing.  
++
++	  If unsure, say Y.
++
++config YAFFS_SHORT_NAMES_IN_RAM
++	bool "Cache short names in RAM"
++	depends on YAFFS_FS
++	default y
++	help
++	  If this config is set, then short names are stored with the
++	  yaffs_Object.  This costs an extra 16 bytes of RAM per object,
++	  but makes look-ups faster.
++
++	  If unsure, say Y.
+diff -urN linux-2.6.21.1/fs/yaffs2/Makefile linux-2.6.21.1.new/fs/yaffs2/Makefile
+--- linux-2.6.21.1/fs/yaffs2/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/Makefile	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,11 @@
++#
++# Makefile for the linux YAFFS filesystem routines.
++#
++
++obj-$(CONFIG_YAFFS_FS) += yaffs.o
++
++yaffs-y := yaffs_ecc.o yaffs_fs.o yaffs_guts.o yaffs_checkptrw.o
++yaffs-y += yaffs_packedtags2.o yaffs_nand.o yaffs_qsort.o
++yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
++yaffs-y += yaffs_mtdif1.o yaffs_packedtags1.o
++yaffs-y += yaffs_mtdif.o yaffs_mtdif2.o
+diff -urN linux-2.6.21.1/fs/yaffs2/devextras.h linux-2.6.21.1.new/fs/yaffs2/devextras.h
+--- linux-2.6.21.1/fs/yaffs2/devextras.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/devextras.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,264 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * This file is just holds extra declarations used during development.
++ * Most of these are from kernel includes placed here so we can use them in 
++ * applications.
++ *
++ */
++
++#ifndef __EXTRAS_H__
++#define __EXTRAS_H__
++
++#if defined WIN32
++#define __inline__ __inline
++#define new newHack
++#endif
++
++#if !(defined __KERNEL__) || (defined WIN32)
++
++/* User space defines */
++
++typedef unsigned char __u8;
++typedef unsigned short __u16;
++typedef unsigned __u32;
++
++/*
++ * Simple doubly linked list implementation.
++ *
++ * Some of the internal functions ("__xxx") are useful when
++ * manipulating whole lists rather than single entries, as
++ * sometimes we already know the next/prev entries and we can
++ * generate better code by using them directly rather than
++ * using the generic single-entry routines.
++ */
++
++#define prefetch(x) 1
++
++struct list_head {
++	struct list_head *next, *prev;
++};
++
++#define LIST_HEAD_INIT(name) { &(name), &(name) }
++
++#define LIST_HEAD(name) \
++	struct list_head name = LIST_HEAD_INIT(name)
++
++#define INIT_LIST_HEAD(ptr) do { \
++	(ptr)->next = (ptr); (ptr)->prev = (ptr); \
++} while (0)
++
++/*
++ * Insert a new entry between two known consecutive entries.
++ *
++ * This is only for internal list manipulation where we know
++ * the prev/next entries already!
++ */
++static __inline__ void __list_add(struct list_head *new,
++				  struct list_head *prev,
++				  struct list_head *next)
++{
++	next->prev = new;
++	new->next = next;
++	new->prev = prev;
++	prev->next = new;
++}
++
++/**
++ * list_add - add a new entry
++ * @new: new entry to be added
++ * @head: list head to add it after
++ *
++ * Insert a new entry after the specified head.
++ * This is good for implementing stacks.
++ */
++static __inline__ void list_add(struct list_head *new, struct list_head *head)
++{
++	__list_add(new, head, head->next);
++}
++
++/**
++ * list_add_tail - add a new entry
++ * @new: new entry to be added
++ * @head: list head to add it before
++ *
++ * Insert a new entry before the specified head.
++ * This is useful for implementing queues.
++ */
++static __inline__ void list_add_tail(struct list_head *new,
++				     struct list_head *head)
++{
++	__list_add(new, head->prev, head);
++}
++
++/*
++ * Delete a list entry by making the prev/next entries
++ * point to each other.
++ *
++ * This is only for internal list manipulation where we know
++ * the prev/next entries already!
++ */
++static __inline__ void __list_del(struct list_head *prev,
++				  struct list_head *next)
++{
++	next->prev = prev;
++	prev->next = next;
++}
++
++/**
++ * list_del - deletes entry from list.
++ * @entry: the element to delete from the list.
++ * Note: list_empty on entry does not return true after this, the entry is
++ * in an undefined state.
++ */
++static __inline__ void list_del(struct list_head *entry)
++{
++	__list_del(entry->prev, entry->next);
++}
++
++/**
++ * list_del_init - deletes entry from list and reinitialize it.
++ * @entry: the element to delete from the list.
++ */
++static __inline__ void list_del_init(struct list_head *entry)
++{
++	__list_del(entry->prev, entry->next);
++	INIT_LIST_HEAD(entry);
++}
++
++/**
++ * list_empty - tests whether a list is empty
++ * @head: the list to test.
++ */
++static __inline__ int list_empty(struct list_head *head)
++{
++	return head->next == head;
++}
++
++/**
++ * list_splice - join two lists
++ * @list: the new list to add.
++ * @head: the place to add it in the first list.
++ */
++static __inline__ void list_splice(struct list_head *list,
++				   struct list_head *head)
++{
++	struct list_head *first = list->next;
++
++	if (first != list) {
++		struct list_head *last = list->prev;
++		struct list_head *at = head->next;
++
++		first->prev = head;
++		head->next = first;
++
++		last->next = at;
++		at->prev = last;
++	}
++}
++
++/**
++ * list_entry - get the struct for this entry
++ * @ptr:	the &struct list_head pointer.
++ * @type:	the type of the struct this is embedded in.
++ * @member:	the name of the list_struct within the struct.
++ */
++#define list_entry(ptr, type, member) \
++	((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
++
++/**
++ * list_for_each	-	iterate over a list
++ * @pos:	the &struct list_head to use as a loop counter.
++ * @head:	the head for your list.
++ */
++#define list_for_each(pos, head) \
++	for (pos = (head)->next, prefetch(pos->next); pos != (head); \
++        	pos = pos->next, prefetch(pos->next))
++
++/**
++ * list_for_each_safe	-	iterate over a list safe against removal
++ *                              of list entry
++ * @pos:	the &struct list_head to use as a loop counter.
++ * @n:		another &struct list_head to use as temporary storage
++ * @head:	the head for your list.
++ */
++#define list_for_each_safe(pos, n, head) \
++	for (pos = (head)->next, n = pos->next; pos != (head); \
++		pos = n, n = pos->next)
++
++/*
++ * File types
++ */
++#define DT_UNKNOWN	0
++#define DT_FIFO		1
++#define DT_CHR		2
++#define DT_DIR		4
++#define DT_BLK		6
++#define DT_REG		8
++#define DT_LNK		10
++#define DT_SOCK		12
++#define DT_WHT		14
++
++#ifndef WIN32
++#include <sys/stat.h>
++#endif
++
++/*
++ * Attribute flags.  These should be or-ed together to figure out what
++ * has been changed!
++ */
++#define ATTR_MODE	1
++#define ATTR_UID	2
++#define ATTR_GID	4
++#define ATTR_SIZE	8
++#define ATTR_ATIME	16
++#define ATTR_MTIME	32
++#define ATTR_CTIME	64
++#define ATTR_ATIME_SET	128
++#define ATTR_MTIME_SET	256
++#define ATTR_FORCE	512	/* Not a change, but a change it */
++#define ATTR_ATTR_FLAG	1024
++
++struct iattr {
++	unsigned int ia_valid;
++	unsigned ia_mode;
++	unsigned ia_uid;
++	unsigned ia_gid;
++	unsigned ia_size;
++	unsigned ia_atime;
++	unsigned ia_mtime;
++	unsigned ia_ctime;
++	unsigned int ia_attr_flags;
++};
++
++#define KERN_DEBUG
++
++#else
++
++#ifndef WIN32
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#endif
++
++#endif
++
++#if defined WIN32
++#undef new
++#endif
++
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/moduleconfig.h linux-2.6.21.1.new/fs/yaffs2/moduleconfig.h
+--- linux-2.6.21.1/fs/yaffs2/moduleconfig.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/moduleconfig.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,65 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Martin Fouts <Martin.Fouts@palmsource.com> 
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_CONFIG_H__
++#define __YAFFS_CONFIG_H__
++
++#ifdef YAFFS_OUT_OF_TREE
++
++/* DO NOT UNSET THESE THREE. YAFFS2 will not compile if you do. */
++#define CONFIG_YAFFS_FS
++#define CONFIG_YAFFS_YAFFS1
++#define CONFIG_YAFFS_YAFFS2
++
++/* These options are independent of each other.  Select those that matter. */
++
++/* Default: Not selected */
++/* Meaning: Yaffs does its own ECC, rather than using MTD ECC */
++//#define CONFIG_YAFFS_DOES_ECC
++
++/* Default: Not selected */
++/* Meaning: ECC byte order is 'wrong'.  Only meaningful if */
++/*          CONFIG_YAFFS_DOES_ECC is set */
++//#define CONFIG_YAFFS_ECC_WRONG_ORDER
++
++/* Default: Selected */
++/* Meaning: Disables testing whether chunks are erased before writing to them*/
++#define CONFIG_YAFFS_DISABLE_CHUNK_ERASED_CHECK
++
++/* Default: Selected */
++/* Meaning: Cache short names, taking more RAM, but faster look-ups */
++#define CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++
++/* Default: 10 */
++/* Meaning: set the count of blocks to reserve for checkpointing */
++#define CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS 10
++
++/*
++Older-style on-NAND data format has a "pageStatus" byte to record
++chunk/page state.  This byte is zeroed when the page is discarded.
++Choose this option if you have existing on-NAND data in this format
++that you need to continue to support.  New data written also uses the
++older-style format.
++Note: Use of this option generally requires that MTD's oob layout be
++adjusted to use the older-style format.  See notes on tags formats and
++MTD versions.
++*/
++/* Default: Not selected */
++/* Meaning: Use older-style on-NAND data format with pageStatus byte */
++#define CONFIG_YAFFS_9BYTE_TAGS
++
++#endif /* YAFFS_OUT_OF_TREE */
++
++#endif /* __YAFFS_CONFIG_H__ */
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_checkptrw.c linux-2.6.21.1.new/fs/yaffs2/yaffs_checkptrw.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_checkptrw.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_checkptrw.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,404 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++const char *yaffs_checkptrw_c_version =
++    "$Id: yaffs_checkptrw.c,v 1.14 2007-05-15 20:07:40 charles Exp $";
++
++
++#include "yaffs_checkptrw.h"
++
++
++static int yaffs_CheckpointSpaceOk(yaffs_Device *dev)
++{
++
++	int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
++	
++	T(YAFFS_TRACE_CHECKPOINT,
++		(TSTR("checkpt blocks available = %d" TENDSTR),
++		blocksAvailable));
++		
++	
++	return (blocksAvailable <= 0) ? 0 : 1;
++}
++
++
++static int yaffs_CheckpointErase(yaffs_Device *dev)
++{
++	
++	int i;
++	
++
++	if(!dev->eraseBlockInNAND)	
++		return 0;
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("checking blocks %d to %d"TENDSTR),
++		dev->internalStartBlock,dev->internalEndBlock));
++		
++	for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++		yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
++		if(bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT){
++			T(YAFFS_TRACE_CHECKPOINT,(TSTR("erasing checkpt block %d"TENDSTR),i));
++			if(dev->eraseBlockInNAND(dev,i- dev->blockOffset /* realign */)){
++				bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
++				dev->nErasedBlocks++;
++				dev->nFreeChunks += dev->nChunksPerBlock;
++			}
++			else {
++				dev->markNANDBlockBad(dev,i);
++				bi->blockState = YAFFS_BLOCK_STATE_DEAD;
++			}
++		}
++	}
++	
++	dev->blocksInCheckpoint = 0;
++	
++	return 1;
++}
++
++
++static void yaffs_CheckpointFindNextErasedBlock(yaffs_Device *dev)
++{
++	int  i;
++	int blocksAvailable = dev->nErasedBlocks - dev->nReservedBlocks;
++	T(YAFFS_TRACE_CHECKPOINT,
++		(TSTR("allocating checkpt block: erased %d reserved %d avail %d next %d "TENDSTR),
++		dev->nErasedBlocks,dev->nReservedBlocks,blocksAvailable,dev->checkpointNextBlock));
++		
++	if(dev->checkpointNextBlock >= 0 &&
++	   dev->checkpointNextBlock <= dev->internalEndBlock &&
++	   blocksAvailable > 0){
++	
++		for(i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++){
++			yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
++			if(bi->blockState == YAFFS_BLOCK_STATE_EMPTY){
++				dev->checkpointNextBlock = i + 1;
++				dev->checkpointCurrentBlock = i;
++				T(YAFFS_TRACE_CHECKPOINT,(TSTR("allocating checkpt block %d"TENDSTR),i));
++				return;
++			}
++		}
++	}
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("out of checkpt blocks"TENDSTR)));
++	
++	dev->checkpointNextBlock = -1;
++	dev->checkpointCurrentBlock = -1;
++}
++
++static void yaffs_CheckpointFindNextCheckpointBlock(yaffs_Device *dev)
++{
++	int  i;
++	yaffs_ExtendedTags tags;
++	
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("find next checkpt block: start:  blocks %d next %d" TENDSTR),
++		dev->blocksInCheckpoint, dev->checkpointNextBlock));
++		
++	if(dev->blocksInCheckpoint < dev->checkpointMaxBlocks) 
++		for(i = dev->checkpointNextBlock; i <= dev->internalEndBlock; i++){
++			int chunk = i * dev->nChunksPerBlock;
++			int realignedChunk = chunk - dev->chunkOffset;
++
++			dev->readChunkWithTagsFromNAND(dev,realignedChunk,NULL,&tags);
++			T(YAFFS_TRACE_CHECKPOINT,(TSTR("find next checkpt block: search: block %d oid %d seq %d eccr %d" TENDSTR), 
++				i, tags.objectId,tags.sequenceNumber,tags.eccResult));
++						      
++			if(tags.sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA){
++				/* Right kind of block */
++				dev->checkpointNextBlock = tags.objectId;
++				dev->checkpointCurrentBlock = i;
++				dev->checkpointBlockList[dev->blocksInCheckpoint] = i;
++				dev->blocksInCheckpoint++;
++				T(YAFFS_TRACE_CHECKPOINT,(TSTR("found checkpt block %d"TENDSTR),i));
++				return;
++			}
++		}
++
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("found no more checkpt blocks"TENDSTR)));
++
++	dev->checkpointNextBlock = -1;
++	dev->checkpointCurrentBlock = -1;
++}
++
++
++int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting)
++{
++	
++	/* Got the functions we need? */
++	if (!dev->writeChunkWithTagsToNAND ||
++	    !dev->readChunkWithTagsFromNAND ||
++	    !dev->eraseBlockInNAND ||
++	    !dev->markNANDBlockBad)
++		return 0;
++
++	if(forWriting && !yaffs_CheckpointSpaceOk(dev))
++		return 0;
++			
++	if(!dev->checkpointBuffer)
++		dev->checkpointBuffer = YMALLOC_DMA(dev->nDataBytesPerChunk);
++	if(!dev->checkpointBuffer)
++		return 0;
++
++	
++	dev->checkpointPageSequence = 0;
++	
++	dev->checkpointOpenForWrite = forWriting;
++	
++	dev->checkpointByteCount = 0;
++	dev->checkpointSum = 0;
++	dev->checkpointXor = 0;
++	dev->checkpointCurrentBlock = -1;
++	dev->checkpointCurrentChunk = -1;
++	dev->checkpointNextBlock = dev->internalStartBlock;
++	
++	/* Erase all the blocks in the checkpoint area */
++	if(forWriting){
++		memset(dev->checkpointBuffer,0,dev->nDataBytesPerChunk);
++		dev->checkpointByteOffset = 0;
++		return yaffs_CheckpointErase(dev);
++		
++		
++	} else {
++		int i;
++		/* Set to a value that will kick off a read */
++		dev->checkpointByteOffset = dev->nDataBytesPerChunk;
++		/* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
++		 * going to be way more than we need */
++		dev->blocksInCheckpoint = 0;
++		dev->checkpointMaxBlocks = (dev->internalEndBlock - dev->internalStartBlock)/16 + 2;
++		dev->checkpointBlockList = YMALLOC(sizeof(int) * dev->checkpointMaxBlocks);
++		for(i = 0; i < dev->checkpointMaxBlocks; i++)
++			dev->checkpointBlockList[i] = -1;
++	}
++	
++	return 1;
++}
++
++int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum)
++{
++	__u32 compositeSum;
++	compositeSum =  (dev->checkpointSum << 8) | (dev->checkpointXor & 0xFF);
++	*sum = compositeSum;
++	return 1;
++}
++
++static int yaffs_CheckpointFlushBuffer(yaffs_Device *dev)
++{
++
++	int chunk;
++	int realignedChunk;
++
++	yaffs_ExtendedTags tags;
++	
++	if(dev->checkpointCurrentBlock < 0){
++		yaffs_CheckpointFindNextErasedBlock(dev);
++		dev->checkpointCurrentChunk = 0;
++	}
++	
++	if(dev->checkpointCurrentBlock < 0)
++		return 0;
++	
++	tags.chunkDeleted = 0;
++	tags.objectId = dev->checkpointNextBlock; /* Hint to next place to look */
++	tags.chunkId = dev->checkpointPageSequence + 1;
++	tags.sequenceNumber =  YAFFS_SEQUENCE_CHECKPOINT_DATA;
++	tags.byteCount = dev->nDataBytesPerChunk;
++	if(dev->checkpointCurrentChunk == 0){
++		/* First chunk we write for the block? Set block state to
++		   checkpoint */
++		yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,dev->checkpointCurrentBlock);
++		bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
++		dev->blocksInCheckpoint++;
++	}
++	
++	chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock + dev->checkpointCurrentChunk;
++
++	
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint wite buffer nand %d(%d:%d) objid %d chId %d" TENDSTR),
++		chunk, dev->checkpointCurrentBlock, dev->checkpointCurrentChunk,tags.objectId,tags.chunkId)); 
++	
++	realignedChunk = chunk - dev->chunkOffset;
++	
++	dev->writeChunkWithTagsToNAND(dev,realignedChunk,dev->checkpointBuffer,&tags);
++	dev->checkpointByteOffset = 0;
++	dev->checkpointPageSequence++;	   
++	dev->checkpointCurrentChunk++;
++	if(dev->checkpointCurrentChunk >= dev->nChunksPerBlock){
++		dev->checkpointCurrentChunk = 0;
++		dev->checkpointCurrentBlock = -1;
++	}
++	memset(dev->checkpointBuffer,0,dev->nDataBytesPerChunk);
++	
++	return 1;
++}
++
++
++int yaffs_CheckpointWrite(yaffs_Device *dev,const void *data, int nBytes)
++{
++	int i=0;
++	int ok = 1;
++
++	
++	__u8 * dataBytes = (__u8 *)data;
++	
++	
++
++	if(!dev->checkpointBuffer)
++		return 0;
++		
++	if(!dev->checkpointOpenForWrite)
++		return -1;
++
++	while(i < nBytes && ok) {
++		
++
++		
++		dev->checkpointBuffer[dev->checkpointByteOffset] = *dataBytes ;
++		dev->checkpointSum += *dataBytes;
++		dev->checkpointXor ^= *dataBytes;
++		 
++		dev->checkpointByteOffset++;
++		i++;
++		dataBytes++;
++		dev->checkpointByteCount++;
++		
++		
++		if(dev->checkpointByteOffset < 0 ||
++		   dev->checkpointByteOffset >= dev->nDataBytesPerChunk) 
++			ok = yaffs_CheckpointFlushBuffer(dev);
++
++	}
++	
++	return 	i;
++}
++
++int yaffs_CheckpointRead(yaffs_Device *dev, void *data, int nBytes)
++{
++	int i=0;
++	int ok = 1;
++	yaffs_ExtendedTags tags;
++
++	
++	int chunk;
++	int realignedChunk;
++
++	__u8 *dataBytes = (__u8 *)data;
++		
++	if(!dev->checkpointBuffer)
++		return 0;
++
++	if(dev->checkpointOpenForWrite)
++		return -1;
++
++	while(i < nBytes && ok) {
++	
++	
++		if(dev->checkpointByteOffset < 0 ||
++		   dev->checkpointByteOffset >= dev->nDataBytesPerChunk) {
++		   
++		   	if(dev->checkpointCurrentBlock < 0){
++				yaffs_CheckpointFindNextCheckpointBlock(dev);
++				dev->checkpointCurrentChunk = 0;
++			}
++			
++			if(dev->checkpointCurrentBlock < 0)
++				ok = 0;
++			else {
++			
++				chunk = dev->checkpointCurrentBlock * dev->nChunksPerBlock + 
++				          dev->checkpointCurrentChunk;
++
++				realignedChunk = chunk - dev->chunkOffset;
++
++	   			/* read in the next chunk */
++	   			/* printf("read checkpoint page %d\n",dev->checkpointPage); */
++				dev->readChunkWithTagsFromNAND(dev, realignedChunk, 
++							       dev->checkpointBuffer,
++							      &tags);
++						      
++				if(tags.chunkId != (dev->checkpointPageSequence + 1) ||
++				   tags.sequenceNumber != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++				   ok = 0;
++
++				dev->checkpointByteOffset = 0;
++				dev->checkpointPageSequence++;
++				dev->checkpointCurrentChunk++;
++			
++				if(dev->checkpointCurrentChunk >= dev->nChunksPerBlock)
++					dev->checkpointCurrentBlock = -1;
++			}
++		}
++		
++		if(ok){
++			*dataBytes = dev->checkpointBuffer[dev->checkpointByteOffset];
++			dev->checkpointSum += *dataBytes;
++			dev->checkpointXor ^= *dataBytes;
++			dev->checkpointByteOffset++;
++			i++;
++			dataBytes++;
++			dev->checkpointByteCount++;
++		}
++	}
++	
++	return 	i;
++}
++
++int yaffs_CheckpointClose(yaffs_Device *dev)
++{
++
++	if(dev->checkpointOpenForWrite){	
++		if(dev->checkpointByteOffset != 0)
++			yaffs_CheckpointFlushBuffer(dev);
++	} else {
++		int i;
++		for(i = 0; i < dev->blocksInCheckpoint && dev->checkpointBlockList[i] >= 0; i++){
++			yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,dev->checkpointBlockList[i]);
++			if(bi->blockState == YAFFS_BLOCK_STATE_EMPTY)
++				bi->blockState = YAFFS_BLOCK_STATE_CHECKPOINT;
++			else {
++				// Todo this looks odd...
++			}
++		}
++		YFREE(dev->checkpointBlockList);
++		dev->checkpointBlockList = NULL;
++	}
++
++	dev->nFreeChunks -= dev->blocksInCheckpoint * dev->nChunksPerBlock;
++	dev->nErasedBlocks -= dev->blocksInCheckpoint;
++
++		
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint byte count %d" TENDSTR),
++			dev->checkpointByteCount));
++			
++	if(dev->checkpointBuffer){
++		/* free the buffer */	
++		YFREE(dev->checkpointBuffer);
++		dev->checkpointBuffer = NULL;
++		return 1;
++	}
++	else
++		return 0;
++	
++}
++
++int yaffs_CheckpointInvalidateStream(yaffs_Device *dev)
++{
++	/* Erase the first checksum block */
++
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("checkpoint invalidate"TENDSTR)));
++
++	if(!yaffs_CheckpointSpaceOk(dev))
++		return 0;
++
++	return yaffs_CheckpointErase(dev);
++}
++
++
++
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_checkptrw.h linux-2.6.21.1.new/fs/yaffs2/yaffs_checkptrw.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_checkptrw.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_checkptrw.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,35 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_CHECKPTRW_H__
++#define __YAFFS_CHECKPTRW_H__
++
++#include "yaffs_guts.h"
++
++int yaffs_CheckpointOpen(yaffs_Device *dev, int forWriting);
++
++int yaffs_CheckpointWrite(yaffs_Device *dev,const void *data, int nBytes);
++
++int yaffs_CheckpointRead(yaffs_Device *dev,void *data, int nBytes);
++
++int yaffs_GetCheckpointSum(yaffs_Device *dev, __u32 *sum);
++
++int yaffs_CheckpointClose(yaffs_Device *dev);
++
++int yaffs_CheckpointInvalidateStream(yaffs_Device *dev);
++
++
++#endif
++
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_ecc.c linux-2.6.21.1.new/fs/yaffs2/yaffs_ecc.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_ecc.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_ecc.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,331 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes. 
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC 
++ * blocks are used on a 512-byte NAND page.
++ *
++ */
++
++/* Table generated by gen-ecc.c
++ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
++ * for each byte of data. These are instead provided in a table in bits7..2.
++ * Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore
++ * this bytes influence on the line parity.
++ */
++
++const char *yaffs_ecc_c_version =
++    "$Id: yaffs_ecc.c,v 1.9 2007-02-14 01:09:06 wookey Exp $";
++
++#include "yportenv.h"
++
++#include "yaffs_ecc.h"
++
++static const unsigned char column_parity_table[] = {
++	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++};
++
++/* Count the bits in an unsigned char or a U32 */
++
++static int yaffs_CountBits(unsigned char x)
++{
++	int r = 0;
++	while (x) {
++		if (x & 1)
++			r++;
++		x >>= 1;
++	}
++	return r;
++}
++
++static int yaffs_CountBits32(unsigned x)
++{
++	int r = 0;
++	while (x) {
++		if (x & 1)
++			r++;
++		x >>= 1;
++	}
++	return r;
++}
++
++/* Calculate the ECC for a 256-byte block of data */
++void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc)
++{
++	unsigned int i;
++
++	unsigned char col_parity = 0;
++	unsigned char line_parity = 0;
++	unsigned char line_parity_prime = 0;
++	unsigned char t;
++	unsigned char b;
++
++	for (i = 0; i < 256; i++) {
++		b = column_parity_table[*data++];
++		col_parity ^= b;
++
++		if (b & 0x01)	// odd number of bits in the byte
++		{
++			line_parity ^= i;
++			line_parity_prime ^= ~i;
++		}
++
++	}
++
++	ecc[2] = (~col_parity) | 0x03;
++
++	t = 0;
++	if (line_parity & 0x80)
++		t |= 0x80;
++	if (line_parity_prime & 0x80)
++		t |= 0x40;
++	if (line_parity & 0x40)
++		t |= 0x20;
++	if (line_parity_prime & 0x40)
++		t |= 0x10;
++	if (line_parity & 0x20)
++		t |= 0x08;
++	if (line_parity_prime & 0x20)
++		t |= 0x04;
++	if (line_parity & 0x10)
++		t |= 0x02;
++	if (line_parity_prime & 0x10)
++		t |= 0x01;
++	ecc[1] = ~t;
++
++	t = 0;
++	if (line_parity & 0x08)
++		t |= 0x80;
++	if (line_parity_prime & 0x08)
++		t |= 0x40;
++	if (line_parity & 0x04)
++		t |= 0x20;
++	if (line_parity_prime & 0x04)
++		t |= 0x10;
++	if (line_parity & 0x02)
++		t |= 0x08;
++	if (line_parity_prime & 0x02)
++		t |= 0x04;
++	if (line_parity & 0x01)
++		t |= 0x02;
++	if (line_parity_prime & 0x01)
++		t |= 0x01;
++	ecc[0] = ~t;
++
++#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
++	// Swap the bytes into the wrong order
++	t = ecc[0];
++	ecc[0] = ecc[1];
++	ecc[1] = t;
++#endif
++}
++
++
++/* Correct the ECC on a 256 byte block of data */
++
++int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
++		     const unsigned char *test_ecc)
++{
++	unsigned char d0, d1, d2;	/* deltas */
++
++	d0 = read_ecc[0] ^ test_ecc[0];
++	d1 = read_ecc[1] ^ test_ecc[1];
++	d2 = read_ecc[2] ^ test_ecc[2];
++
++	if ((d0 | d1 | d2) == 0)
++		return 0; /* no error */
++
++	if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
++	    ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
++	    ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
++		/* Single bit (recoverable) error in data */
++
++		unsigned byte;
++		unsigned bit;
++
++#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
++		// swap the bytes to correct for the wrong order
++		unsigned char t;
++
++		t = d0;
++		d0 = d1;
++		d1 = t;
++#endif
++
++		bit = byte = 0;
++
++		if (d1 & 0x80)
++			byte |= 0x80;
++		if (d1 & 0x20)
++			byte |= 0x40;
++		if (d1 & 0x08)
++			byte |= 0x20;
++		if (d1 & 0x02)
++			byte |= 0x10;
++		if (d0 & 0x80)
++			byte |= 0x08;
++		if (d0 & 0x20)
++			byte |= 0x04;
++		if (d0 & 0x08)
++			byte |= 0x02;
++		if (d0 & 0x02)
++			byte |= 0x01;
++
++		if (d2 & 0x80)
++			bit |= 0x04;
++		if (d2 & 0x20)
++			bit |= 0x02;
++		if (d2 & 0x08)
++			bit |= 0x01;
++
++		data[byte] ^= (1 << bit);
++
++		return 1; /* Corrected the error */
++	}
++
++	if ((yaffs_CountBits(d0) + 
++	     yaffs_CountBits(d1) + 
++	     yaffs_CountBits(d2)) ==  1) {
++		/* Reccoverable error in ecc */
++
++		read_ecc[0] = test_ecc[0];
++		read_ecc[1] = test_ecc[1];
++		read_ecc[2] = test_ecc[2];
++
++		return 1; /* Corrected the error */
++	}
++	
++	/* Unrecoverable error */
++
++	return -1;
++
++}
++
++
++/*
++ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
++ */
++void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
++			     yaffs_ECCOther * eccOther)
++{
++	unsigned int i;
++
++	unsigned char col_parity = 0;
++	unsigned line_parity = 0;
++	unsigned line_parity_prime = 0;
++	unsigned char b;
++
++	for (i = 0; i < nBytes; i++) {
++		b = column_parity_table[*data++];
++		col_parity ^= b;
++
++		if (b & 0x01)	 {
++			/* odd number of bits in the byte */
++			line_parity ^= i;
++			line_parity_prime ^= ~i;
++		}
++
++	}
++
++	eccOther->colParity = (col_parity >> 2) & 0x3f;
++	eccOther->lineParity = line_parity;
++	eccOther->lineParityPrime = line_parity_prime;
++}
++
++int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
++			  yaffs_ECCOther * read_ecc,
++			  const yaffs_ECCOther * test_ecc)
++{
++	unsigned char cDelta;	/* column parity delta */
++	unsigned lDelta;	/* line parity delta */
++	unsigned lDeltaPrime;	/* line parity delta */
++	unsigned bit;
++
++	cDelta = read_ecc->colParity ^ test_ecc->colParity;
++	lDelta = read_ecc->lineParity ^ test_ecc->lineParity;
++	lDeltaPrime = read_ecc->lineParityPrime ^ test_ecc->lineParityPrime;
++
++	if ((cDelta | lDelta | lDeltaPrime) == 0)
++		return 0; /* no error */
++
++	if (lDelta == ~lDeltaPrime && 
++	    (((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15))
++	{
++		/* Single bit (recoverable) error in data */
++
++		bit = 0;
++
++		if (cDelta & 0x20)
++			bit |= 0x04;
++		if (cDelta & 0x08)
++			bit |= 0x02;
++		if (cDelta & 0x02)
++			bit |= 0x01;
++
++		if(lDelta >= nBytes)
++			return -1;
++			
++		data[lDelta] ^= (1 << bit);
++
++		return 1; /* corrected */
++	}
++
++	if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) +
++	     yaffs_CountBits(cDelta)) == 1) {
++		/* Reccoverable error in ecc */
++
++		*read_ecc = *test_ecc;
++		return 1; /* corrected */
++	}
++
++	/* Unrecoverable error */
++
++	return -1;
++
++}
++
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_ecc.h linux-2.6.21.1.new/fs/yaffs2/yaffs_ecc.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_ecc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_ecc.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++ /*
++  * This code implements the ECC algorithm used in SmartMedia.
++  *
++  * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes. 
++  * The two unused bit are set to 1.
++  * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC 
++  * blocks are used on a 512-byte NAND page.
++  *
++  */
++
++#ifndef __YAFFS_ECC_H__
++#define __YAFFS_ECC_H__
++
++typedef struct {
++	unsigned char colParity;
++	unsigned lineParity;
++	unsigned lineParityPrime;
++} yaffs_ECCOther;
++
++void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc);
++int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
++		     const unsigned char *test_ecc);
++
++void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
++			     yaffs_ECCOther * ecc);
++int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
++			  yaffs_ECCOther * read_ecc,
++			  const yaffs_ECCOther * test_ecc);
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_fs.c linux-2.6.21.1.new/fs/yaffs2/yaffs_fs.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_fs.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_fs.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,2278 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ * Acknowledgements:
++ * Luc van OostenRyck for numerous patches.
++ * Nick Bane for numerous patches.
++ * Nick Bane for 2.5/2.6 integration.
++ * Andras Toth for mknod rdev issue.
++ * Michael Fischer for finding the problem with inode inconsistency.
++ * Some code bodily lifted from JFFS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ *
++ * This is the file system front-end to YAFFS that hooks it up to
++ * the VFS.
++ *
++ * Special notes: 
++ * >> 2.4: sb->u.generic_sbp points to the yaffs_Device associated with
++ *         this superblock
++ * >> 2.6: sb->s_fs_info  points to the yaffs_Device associated with this
++ *         superblock
++ * >> inode->u.generic_ip points to the associated yaffs_Object.
++ */
++
++const char *yaffs_fs_c_version =
++    "$Id: yaffs_fs.c,v 1.60 2007-05-15 20:07:40 charles Exp $";
++extern const char *yaffs_guts_c_version;
++
++#include <linux/version.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#include <linux/config.h>
++#endif
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/mtd/mtd.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++
++#include <linux/statfs.h>	/* Added NCB 15-8-2003 */
++#include <asm/statfs.h>
++#define UnlockPage(p) unlock_page(p)
++#define Page_Uptodate(page)	test_bit(PG_uptodate, &(page)->flags)
++
++/* FIXME: use sb->s_id instead ? */
++#define yaffs_devname(sb, buf)	bdevname(sb->s_bdev, buf)
++
++#else
++
++#include <linux/locks.h>
++#define	BDEVNAME_SIZE		0
++#define	yaffs_devname(sb, buf)	kdevname(sb->s_dev)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
++/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
++#define __user
++#endif
++
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++#define WRITE_SIZE_STR "writesize"
++#define WRITE_SIZE(mtd) (mtd)->writesize
++#else
++#define WRITE_SIZE_STR "oobblock"
++#define WRITE_SIZE(mtd) (mtd)->oobblock
++#endif
++
++#include <asm/uaccess.h>
++
++#include "yportenv.h"
++#include "yaffs_guts.h"
++
++#include <linux/mtd/mtd.h>
++#include "yaffs_mtdif.h"
++#include "yaffs_mtdif1.h"
++#include "yaffs_mtdif2.h"
++
++unsigned int yaffs_traceMask = YAFFS_TRACE_BAD_BLOCKS;
++unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++
++/* Module Parameters */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++module_param(yaffs_traceMask,uint,0644);
++module_param(yaffs_wr_attempts,uint,0644);
++#else
++MODULE_PARM(yaffs_traceMask,"i");
++MODULE_PARM(yaffs_wr_attempts,"i");
++#endif
++
++/*#define T(x) printk x */
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18))
++#define yaffs_InodeToObjectLV(iptr) (iptr)->i_private
++#else
++#define yaffs_InodeToObjectLV(iptr) (iptr)->u.generic_ip
++#endif
++
++#define yaffs_InodeToObject(iptr) ((yaffs_Object *)(yaffs_InodeToObjectLV(iptr)))
++#define yaffs_DentryToObject(dptr) yaffs_InodeToObject((dptr)->d_inode)
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#define yaffs_SuperToDevice(sb)	((yaffs_Device *)sb->s_fs_info)
++#else
++#define yaffs_SuperToDevice(sb)	((yaffs_Device *)sb->u.generic_sbp)
++#endif
++
++static void yaffs_put_super(struct super_block *sb);
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++				loff_t * pos);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id);
++#else
++static int yaffs_file_flush(struct file *file);
++#endif
++
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++			     int datasync);
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++			struct nameidata *n);
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++				   struct nameidata *n);
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode);
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry);
++#endif
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++		      struct dentry *dentry);
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry);
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++			 const char *symname);
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++		       dev_t dev);
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++		       int dev);
++#endif
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++			struct inode *new_dir, struct dentry *new_dentry);
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static int yaffs_sync_fs(struct super_block *sb, int wait);
++static void yaffs_write_super(struct super_block *sb);
++#else
++static int yaffs_sync_fs(struct super_block *sb);
++static int yaffs_write_super(struct super_block *sb);
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf);
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf);
++#endif
++static void yaffs_read_inode(struct inode *inode);
++
++static void yaffs_put_inode(struct inode *inode);
++static void yaffs_delete_inode(struct inode *);
++static void yaffs_clear_inode(struct inode *);
++
++static int yaffs_readpage(struct file *file, struct page *page);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc);
++#else
++static int yaffs_writepage(struct page *page);
++#endif
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++			       unsigned offset, unsigned to);
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++			      unsigned to);
++
++static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
++			  int buflen);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd);
++#endif
++
++static struct address_space_operations yaffs_file_address_operations = {
++	.readpage = yaffs_readpage,
++	.writepage = yaffs_writepage,
++	.prepare_write = yaffs_prepare_write,
++	.commit_write = yaffs_commit_write,
++};
++
++static struct file_operations yaffs_file_operations = {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18))
++	.read = do_sync_read,
++	.write = do_sync_write,
++	.aio_read = generic_file_aio_read,
++	.aio_write = generic_file_aio_write,
++#else
++	.read = generic_file_read,
++	.write = generic_file_write,
++#endif
++	.mmap = generic_file_mmap,
++	.flush = yaffs_file_flush,
++	.fsync = yaffs_sync_object,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++	.sendfile = generic_file_sendfile,
++#endif
++
++};
++
++static struct inode_operations yaffs_file_inode_operations = {
++	.setattr = yaffs_setattr,
++};
++
++static struct inode_operations yaffs_symlink_inode_operations = {
++	.readlink = yaffs_readlink,
++	.follow_link = yaffs_follow_link,
++	.setattr = yaffs_setattr,
++};
++
++static struct inode_operations yaffs_dir_inode_operations = {
++	.create = yaffs_create,
++	.lookup = yaffs_lookup,
++	.link = yaffs_link,
++	.unlink = yaffs_unlink,
++	.symlink = yaffs_symlink,
++	.mkdir = yaffs_mkdir,
++	.rmdir = yaffs_unlink,
++	.mknod = yaffs_mknod,
++	.rename = yaffs_rename,
++	.setattr = yaffs_setattr,
++};
++
++static struct file_operations yaffs_dir_operations = {
++	.read = generic_read_dir,
++	.readdir = yaffs_readdir,
++	.fsync = yaffs_sync_object,
++};
++
++static struct super_operations yaffs_super_ops = {
++	.statfs = yaffs_statfs,
++	.read_inode = yaffs_read_inode,
++	.put_inode = yaffs_put_inode,
++	.put_super = yaffs_put_super,
++	.delete_inode = yaffs_delete_inode,
++	.clear_inode = yaffs_clear_inode,
++	.sync_fs = yaffs_sync_fs,
++	.write_super = yaffs_write_super,
++};
++
++static void yaffs_GrossLock(yaffs_Device * dev)
++{
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs locking\n"));
++
++	down(&dev->grossLock);
++}
++
++static void yaffs_GrossUnlock(yaffs_Device * dev)
++{
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs unlocking\n"));
++	up(&dev->grossLock);
++
++}
++
++static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
++			  int buflen)
++{
++	unsigned char *alias;
++	int ret;
++
++	yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
++
++	yaffs_GrossUnlock(dev);
++
++	if (!alias)
++		return -ENOMEM;
++
++	ret = vfs_readlink(dentry, buffer, buflen, alias);
++	kfree(alias);
++	return ret;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++#endif
++{
++	unsigned char *alias;
++	int ret;
++	yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	alias = yaffs_GetSymlinkAlias(yaffs_DentryToObject(dentry));
++
++	yaffs_GrossUnlock(dev);
++
++	if (!alias)
++        {
++		ret = -ENOMEM;
++		goto out;
++        }
++
++	ret = vfs_follow_link(nd, alias);
++	kfree(alias);
++out:
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++	return ERR_PTR (ret);
++#else
++	return ret;
++#endif
++}
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++			      yaffs_Object * obj);
++
++/*
++ * Lookup is used to find objects in the fs
++ */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++				   struct nameidata *n)
++#else
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
++#endif
++{
++	yaffs_Object *obj;
++	struct inode *inode = NULL;	/* NCB 2.5/2.6 needs NULL here */
++
++	yaffs_Device *dev = yaffs_InodeToObject(dir)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_lookup for %d:%s\n",
++	   yaffs_InodeToObject(dir)->objectId, dentry->d_name.name));
++
++	obj =
++	    yaffs_FindObjectByName(yaffs_InodeToObject(dir),
++				   dentry->d_name.name);
++
++	obj = yaffs_GetEquivalentObject(obj);	/* in case it was a hardlink */
++	
++	/* Can't hold gross lock when calling yaffs_get_inode() */
++	yaffs_GrossUnlock(dev);
++
++	if (obj) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_lookup found %d\n", obj->objectId));
++
++		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++
++		if (inode) {
++			T(YAFFS_TRACE_OS,
++			  (KERN_DEBUG "yaffs_loookup dentry \n"));
++/* #if 0 asserted by NCB for 2.5/6 compatability - falls through to
++ * d_add even if NULL inode */
++#if 0
++			/*dget(dentry); // try to solve directory bug */
++			d_add(dentry, inode);
++
++			/* return dentry; */
++			return NULL;
++#endif
++		}
++
++	} else {
++		T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_lookup not found\n"));
++
++	}
++
++/* added NCB for 2.5/6 compatability - forces add even if inode is
++ * NULL which creates dentry hash */
++	d_add(dentry, inode);
++
++	return NULL;
++	/*      return (ERR_PTR(-EIO)); */
++
++}
++
++/* For now put inode is just for debugging
++ * Put inode is called when the inode **structure** is put.
++ */
++static void yaffs_put_inode(struct inode *inode)
++{
++	T(YAFFS_TRACE_OS,
++	  ("yaffs_put_inode: ino %d, count %d\n", (int)inode->i_ino,
++	   atomic_read(&inode->i_count)));
++
++}
++
++/* clear is called to tell the fs to release any per-inode data it holds */
++static void yaffs_clear_inode(struct inode *inode)
++{
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++
++	obj = yaffs_InodeToObject(inode);
++
++	T(YAFFS_TRACE_OS,
++	  ("yaffs_clear_inode: ino %d, count %d %s\n", (int)inode->i_ino,
++	   atomic_read(&inode->i_count),
++	   obj ? "object exists" : "null object"));
++
++	if (obj) {
++		dev = obj->myDev;
++		yaffs_GrossLock(dev);
++
++		/* Clear the association between the inode and
++		 * the yaffs_Object.
++		 */
++		obj->myInode = NULL;
++		yaffs_InodeToObjectLV(inode) = NULL;
++
++		/* If the object freeing was deferred, then the real
++		 * free happens now.
++		 * This should fix the inode inconsistency problem.
++		 */
++
++		yaffs_HandleDeferedFree(obj);
++
++		yaffs_GrossUnlock(dev);
++	}
++
++}
++
++/* delete is called when the link count is zero and the inode
++ * is put (ie. nobody wants to know about it anymore, time to
++ * delete the file).
++ * NB Must call clear_inode()
++ */
++static void yaffs_delete_inode(struct inode *inode)
++{
++	yaffs_Object *obj = yaffs_InodeToObject(inode);
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS,
++	  ("yaffs_delete_inode: ino %d, count %d %s\n", (int)inode->i_ino,
++	   atomic_read(&inode->i_count),
++	   obj ? "object exists" : "null object"));
++
++	if (obj) {
++		dev = obj->myDev;
++		yaffs_GrossLock(dev);
++		yaffs_DeleteFile(obj);
++		yaffs_GrossUnlock(dev);
++	}
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
++        truncate_inode_pages (&inode->i_data, 0);
++#endif
++	clear_inode(inode);
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id)
++#else
++static int yaffs_file_flush(struct file *file)
++#endif
++{
++	yaffs_Object *obj = yaffs_DentryToObject(file->f_dentry);
++
++	yaffs_Device *dev = obj->myDev;
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_file_flush object %d (%s)\n", obj->objectId,
++	   obj->dirty ? "dirty" : "clean"));
++
++	yaffs_GrossLock(dev);
++
++	yaffs_FlushFile(obj, 1);
++
++	yaffs_GrossUnlock(dev);
++
++	return 0;
++}
++
++static int yaffs_readpage_nolock(struct file *f, struct page *pg)
++{
++	/* Lifted from jffs2 */
++
++	yaffs_Object *obj;
++	unsigned char *pg_buf;
++	int ret;
++
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage at %08x, size %08x\n",
++			   (unsigned)(pg->index << PAGE_CACHE_SHIFT),
++			   (unsigned)PAGE_CACHE_SIZE));
++
++	obj = yaffs_DentryToObject(f->f_dentry);
++
++	dev = obj->myDev;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++	BUG_ON(!PageLocked(pg));
++#else
++	if (!PageLocked(pg))
++		PAGE_BUG(pg);
++#endif
++
++	pg_buf = kmap(pg);
++	/* FIXME: Can kmap fail? */
++
++	yaffs_GrossLock(dev);
++
++	ret =
++	    yaffs_ReadDataFromFile(obj, pg_buf, pg->index << PAGE_CACHE_SHIFT,
++				   PAGE_CACHE_SIZE);
++
++	yaffs_GrossUnlock(dev);
++
++	if (ret >= 0)
++		ret = 0;
++
++	if (ret) {
++		ClearPageUptodate(pg);
++		SetPageError(pg);
++	} else {
++		SetPageUptodate(pg);
++		ClearPageError(pg);
++	}
++
++	flush_dcache_page(pg);
++	kunmap(pg);
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage done\n"));
++	return ret;
++}
++
++static int yaffs_readpage_unlock(struct file *f, struct page *pg)
++{
++	int ret = yaffs_readpage_nolock(f, pg);
++	UnlockPage(pg);
++	return ret;
++}
++
++static int yaffs_readpage(struct file *f, struct page *pg)
++{
++	return yaffs_readpage_unlock(f, pg);
++}
++
++/* writepage inspired by/stolen from smbfs */
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
++#else
++static int yaffs_writepage(struct page *page)
++#endif
++{
++	struct address_space *mapping = page->mapping;
++	loff_t offset = (loff_t) page->index << PAGE_CACHE_SHIFT;
++	struct inode *inode;
++	unsigned long end_index;
++	char *buffer;
++	yaffs_Object *obj;
++	int nWritten = 0;
++	unsigned nBytes;
++
++	if (!mapping)
++		BUG();
++	inode = mapping->host;
++	if (!inode)
++		BUG();
++
++	if (offset > inode->i_size) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG
++		   "yaffs_writepage at %08x, inode size = %08x!!!\n",
++		   (unsigned)(page->index << PAGE_CACHE_SHIFT),
++		   (unsigned)inode->i_size));
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "                -> don't care!!\n"));
++		unlock_page(page);
++		return 0;
++	}
++
++	end_index = inode->i_size >> PAGE_CACHE_SHIFT;
++
++	/* easy case */
++	if (page->index < end_index) {
++		nBytes = PAGE_CACHE_SIZE;
++	} else {
++		nBytes = inode->i_size & (PAGE_CACHE_SIZE - 1);
++	}
++
++	get_page(page);
++
++	buffer = kmap(page);
++
++	obj = yaffs_InodeToObject(inode);
++	yaffs_GrossLock(obj->myDev);
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_writepage at %08x, size %08x\n",
++	   (unsigned)(page->index << PAGE_CACHE_SHIFT), nBytes));
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "writepag0: obj = %05x, ino = %05x\n",
++	   (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
++
++	nWritten =
++	    yaffs_WriteDataToFile(obj, buffer, page->index << PAGE_CACHE_SHIFT,
++				  nBytes, 0);
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "writepag1: obj = %05x, ino = %05x\n",
++	   (int)obj->variant.fileVariant.fileSize, (int)inode->i_size));
++
++	yaffs_GrossUnlock(obj->myDev);
++
++	kunmap(page);
++	SetPageUptodate(page);
++	UnlockPage(page);
++	put_page(page);
++
++	return (nWritten == nBytes) ? 0 : -ENOSPC;
++}
++
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++			       unsigned offset, unsigned to)
++{
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_prepair_write\n"));
++	if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE))
++		return yaffs_readpage_nolock(f, pg);
++
++	return 0;
++
++}
++
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++			      unsigned to)
++{
++
++	void *addr = page_address(pg) + offset;
++	loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
++	int nBytes = to - offset;
++	int nWritten;
++
++	unsigned spos = pos;
++	unsigned saddr = (unsigned)addr;
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_commit_write addr %x pos %x nBytes %d\n", saddr,
++	   spos, nBytes));
++
++	nWritten = yaffs_file_write(f, addr, nBytes, &pos);
++
++	if (nWritten != nBytes) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG
++		   "yaffs_commit_write not same size nWritten %d  nBytes %d\n",
++		   nWritten, nBytes));
++		SetPageError(pg);
++		ClearPageUptodate(pg);
++	} else {
++		SetPageUptodate(pg);
++	}
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_commit_write returning %d\n",
++	   nWritten == nBytes ? 0 : nWritten));
++
++	return nWritten == nBytes ? 0 : nWritten;
++
++}
++
++static void yaffs_FillInodeFromObject(struct inode *inode, yaffs_Object * obj)
++{
++	if (inode && obj) {
++
++
++		/* Check mode against the variant type and attempt to repair if broken. */
++ 		__u32 mode = obj->yst_mode;
++ 		switch( obj->variantType ){
++ 		case YAFFS_OBJECT_TYPE_FILE :
++ 		        if( ! S_ISREG(mode) ){
++ 			        obj->yst_mode &= ~S_IFMT;
++ 			        obj->yst_mode |= S_IFREG;
++ 			}
++ 
++ 			break;
++ 		case YAFFS_OBJECT_TYPE_SYMLINK :
++ 		        if( ! S_ISLNK(mode) ){
++ 			        obj->yst_mode &= ~S_IFMT;
++ 				obj->yst_mode |= S_IFLNK;
++ 			}
++ 
++ 			break;
++ 		case YAFFS_OBJECT_TYPE_DIRECTORY :
++ 		        if( ! S_ISDIR(mode) ){
++ 			        obj->yst_mode &= ~S_IFMT;
++ 			        obj->yst_mode |= S_IFDIR;
++ 			}
++ 
++ 			break;
++ 		case YAFFS_OBJECT_TYPE_UNKNOWN :
++ 		case YAFFS_OBJECT_TYPE_HARDLINK :
++ 		case YAFFS_OBJECT_TYPE_SPECIAL :
++ 		default:
++ 		        /* TODO? */
++ 		        break;
++ 		}
++
++		inode->i_ino = obj->objectId;
++		inode->i_mode = obj->yst_mode;
++		inode->i_uid = obj->yst_uid;
++		inode->i_gid = obj->yst_gid;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++		inode->i_blksize = inode->i_sb->s_blocksize;
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++
++		inode->i_rdev = old_decode_dev(obj->yst_rdev);
++		inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
++		inode->i_atime.tv_nsec = 0;
++		inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
++		inode->i_mtime.tv_nsec = 0;
++		inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
++		inode->i_ctime.tv_nsec = 0;
++#else
++		inode->i_rdev = obj->yst_rdev;
++		inode->i_atime = obj->yst_atime;
++		inode->i_mtime = obj->yst_mtime;
++		inode->i_ctime = obj->yst_ctime;
++#endif
++		inode->i_size = yaffs_GetObjectFileLength(obj);
++		inode->i_blocks = (inode->i_size + 511) >> 9;
++
++		inode->i_nlink = yaffs_GetObjectLinkCount(obj);
++
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG
++		   "yaffs_FillInode mode %x uid %d gid %d size %d count %d\n",
++		   inode->i_mode, inode->i_uid, inode->i_gid,
++		   (int)inode->i_size, atomic_read(&inode->i_count)));
++
++		switch (obj->yst_mode & S_IFMT) {
++		default:	/* fifo, device or socket */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++			init_special_inode(inode, obj->yst_mode,
++					   old_decode_dev(obj->yst_rdev));
++#else
++			init_special_inode(inode, obj->yst_mode,
++					   (dev_t) (obj->yst_rdev));
++#endif
++			break;
++		case S_IFREG:	/* file */
++			inode->i_op = &yaffs_file_inode_operations;
++			inode->i_fop = &yaffs_file_operations;
++			inode->i_mapping->a_ops =
++			    &yaffs_file_address_operations;
++			break;
++		case S_IFDIR:	/* directory */
++			inode->i_op = &yaffs_dir_inode_operations;
++			inode->i_fop = &yaffs_dir_operations;
++			break;
++		case S_IFLNK:	/* symlink */
++			inode->i_op = &yaffs_symlink_inode_operations;
++			break;
++		}
++
++		yaffs_InodeToObjectLV(inode) = obj;
++
++		obj->myInode = inode;
++
++	} else {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_FileInode invalid parameters\n"));
++	}
++
++}
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++			      yaffs_Object * obj)
++{
++	struct inode *inode;
++
++	if (!sb) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_get_inode for NULL super_block!!\n"));
++		return NULL;
++
++	}
++
++	if (!obj) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_get_inode for NULL object!!\n"));
++		return NULL;
++
++	}
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_get_inode for object %d\n", obj->objectId));
++
++	inode = iget(sb, obj->objectId);
++
++	/* NB Side effect: iget calls back to yaffs_read_inode(). */
++	/* iget also increments the inode's i_count */
++	/* NB You can't be holding grossLock or deadlock will happen! */
++
++	return inode;
++}
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++				loff_t * pos)
++{
++	yaffs_Object *obj;
++	int nWritten, ipos;
++	struct inode *inode;
++	yaffs_Device *dev;
++
++	obj = yaffs_DentryToObject(f->f_dentry);
++
++	dev = obj->myDev;
++
++	yaffs_GrossLock(dev);
++
++	inode = f->f_dentry->d_inode;
++
++	if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND) {
++		ipos = inode->i_size;
++	} else {
++		ipos = *pos;
++	}
++
++	if (!obj) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_file_write: hey obj is null!\n"));
++	} else {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG
++		   "yaffs_file_write about to write writing %d bytes"
++		   "to object %d at %d\n",
++		   n, obj->objectId, ipos));
++	}
++
++	nWritten = yaffs_WriteDataToFile(obj, buf, ipos, n, 0);
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_file_write writing %d bytes, %d written at %d\n",
++	   n, nWritten, ipos));
++	if (nWritten > 0) {
++		ipos += nWritten;
++		*pos = ipos;
++		if (ipos > inode->i_size) {
++			inode->i_size = ipos;
++			inode->i_blocks = (ipos + 511) >> 9;
++
++			T(YAFFS_TRACE_OS,
++			  (KERN_DEBUG
++			   "yaffs_file_write size updated to %d bytes, "
++			   "%d blocks\n",
++			   ipos, (int)(inode->i_blocks)));
++		}
++
++	}
++	yaffs_GrossUnlock(dev);
++	return nWritten == 0 ? -ENOSPC : nWritten;
++}
++
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
++{
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++	struct inode *inode = f->f_dentry->d_inode;
++	unsigned long offset, curoffs;
++	struct list_head *i;
++	yaffs_Object *l;
++
++	char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++	obj = yaffs_DentryToObject(f->f_dentry);
++	dev = obj->myDev;
++
++	yaffs_GrossLock(dev);
++
++	offset = f->f_pos;
++
++	T(YAFFS_TRACE_OS, ("yaffs_readdir: starting at %d\n", (int)offset));
++
++	if (offset == 0) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_readdir: entry . ino %d \n",
++		   (int)inode->i_ino));
++		if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR)
++		    < 0) {
++			goto out;
++		}
++		offset++;
++		f->f_pos++;
++	}
++	if (offset == 1) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_readdir: entry .. ino %d \n",
++		   (int)f->f_dentry->d_parent->d_inode->i_ino));
++		if (filldir
++		    (dirent, "..", 2, offset,
++		     f->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
++			goto out;
++		}
++		offset++;
++		f->f_pos++;
++	}
++
++	curoffs = 1;
++
++	/* If the directory has changed since the open or last call to
++	   readdir, rewind to after the 2 canned entries. */
++
++	if (f->f_version != inode->i_version) {
++		offset = 2;
++		f->f_pos = offset;
++		f->f_version = inode->i_version;
++	}
++
++	list_for_each(i, &obj->variant.directoryVariant.children) {
++		curoffs++;
++		if (curoffs >= offset) {
++			l = list_entry(i, yaffs_Object, siblings);
++
++			yaffs_GetObjectName(l, name,
++					    YAFFS_MAX_NAME_LENGTH + 1);
++			T(YAFFS_TRACE_OS,
++			  (KERN_DEBUG "yaffs_readdir: %s inode %d\n", name,
++			   yaffs_GetObjectInode(l)));
++
++			if (filldir(dirent,
++				    name,
++				    strlen(name),
++				    offset,
++				    yaffs_GetObjectInode(l),
++				    yaffs_GetObjectType(l))
++			    < 0) {
++				goto up_and_out;
++			}
++
++			offset++;
++			f->f_pos++;
++		}
++	}
++
++      up_and_out:
++      out:
++
++	yaffs_GrossUnlock(dev);
++
++	return 0;
++}
++
++/*
++ * File creation. Allocate an inode, and we're done..
++ */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++		       dev_t rdev)
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++		       int rdev)
++#endif
++{
++	struct inode *inode;
++
++	yaffs_Object *obj = NULL;
++	yaffs_Device *dev;
++
++	yaffs_Object *parent = yaffs_InodeToObject(dir);
++
++	int error = -ENOSPC;
++	uid_t uid = current->fsuid;
++	gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
++	
++	if((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++		mode |= S_ISGID;
++
++	if (parent) {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_mknod: parent object %d type %d\n",
++		   parent->objectId, parent->variantType));
++	} else {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_mknod: could not get parent object\n"));
++		return -EPERM;
++	}
++
++	T(YAFFS_TRACE_OS, ("yaffs_mknod: making oject for %s, "
++			   "mode %x dev %x\n",
++			   dentry->d_name.name, mode, rdev));
++
++	dev = parent->myDev;
++
++	yaffs_GrossLock(dev);
++
++	switch (mode & S_IFMT) {
++	default:
++		/* Special (socket, fifo, device...) */
++		T(YAFFS_TRACE_OS, (KERN_DEBUG
++				   "yaffs_mknod: making special\n"));
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++		obj =
++		    yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
++				       gid, old_encode_dev(rdev));
++#else
++		obj =
++		    yaffs_MknodSpecial(parent, dentry->d_name.name, mode, uid,
++				       gid, rdev);
++#endif
++		break;
++	case S_IFREG:		/* file          */
++		T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mknod: making file\n"));
++		obj =
++		    yaffs_MknodFile(parent, dentry->d_name.name, mode, uid,
++				    gid);
++		break;
++	case S_IFDIR:		/* directory */
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_mknod: making directory\n"));
++		obj =
++		    yaffs_MknodDirectory(parent, dentry->d_name.name, mode,
++					 uid, gid);
++		break;
++	case S_IFLNK:		/* symlink */
++		T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mknod: making file\n"));
++		obj = NULL;	/* Do we ever get here? */
++		break;
++	}
++	
++	/* Can not call yaffs_get_inode() with gross lock held */
++	yaffs_GrossUnlock(dev);
++
++	if (obj) {
++		inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
++		d_instantiate(dentry, inode);
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_mknod created object %d count = %d\n",
++		   obj->objectId, atomic_read(&inode->i_count)));
++		error = 0;
++	} else {
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_mknod failed making object\n"));
++		error = -ENOMEM;
++	}
++
++	return error;
++}
++
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++	int retVal;
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_mkdir\n"));
++	retVal = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
++#if 0
++	/* attempt to fix dir bug - didn't work */
++	if (!retVal) {
++		dget(dentry);
++	}
++#endif
++	return retVal;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++			struct nameidata *n)
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_create\n"));
++	return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
++}
++
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
++{
++	int retVal;
++
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_unlink %d:%s\n", (int)(dir->i_ino),
++	   dentry->d_name.name));
++
++	dev = yaffs_InodeToObject(dir)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	retVal = yaffs_Unlink(yaffs_InodeToObject(dir), dentry->d_name.name);
++
++	if (retVal == YAFFS_OK) {
++		dentry->d_inode->i_nlink--;
++		dir->i_version++;
++		yaffs_GrossUnlock(dev);
++		mark_inode_dirty(dentry->d_inode);
++		return 0;
++	}
++	yaffs_GrossUnlock(dev);
++	return -ENOTEMPTY;
++}
++
++/*
++ * Create a link...
++ */
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++		      struct dentry *dentry)
++{
++	struct inode *inode = old_dentry->d_inode;
++	yaffs_Object *obj = NULL;
++	yaffs_Object *link = NULL;
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_link\n"));
++
++	obj = yaffs_InodeToObject(inode);
++	dev = obj->myDev;
++
++	yaffs_GrossLock(dev);
++
++	if (!S_ISDIR(inode->i_mode))	/* Don't link directories */
++	{
++		link =
++		    yaffs_Link(yaffs_InodeToObject(dir), dentry->d_name.name,
++			       obj);
++	}
++
++	if (link) {
++		old_dentry->d_inode->i_nlink = yaffs_GetObjectLinkCount(obj);
++		d_instantiate(dentry, old_dentry->d_inode);
++		atomic_inc(&old_dentry->d_inode->i_count);
++		T(YAFFS_TRACE_OS,
++		  (KERN_DEBUG "yaffs_link link count %d i_count %d\n",
++		   old_dentry->d_inode->i_nlink,
++		   atomic_read(&old_dentry->d_inode->i_count)));
++
++	}
++
++	yaffs_GrossUnlock(dev);
++
++	if (link) {
++
++		return 0;
++	}
++
++	return -EPERM;
++}
++
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++			 const char *symname)
++{
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++	uid_t uid = current->fsuid;
++	gid_t gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid;
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_symlink\n"));
++
++	dev = yaffs_InodeToObject(dir)->myDev;
++	yaffs_GrossLock(dev);
++	obj = yaffs_MknodSymLink(yaffs_InodeToObject(dir), dentry->d_name.name,
++				 S_IFLNK | S_IRWXUGO, uid, gid, symname);
++	yaffs_GrossUnlock(dev);
++
++	if (obj) {
++
++		struct inode *inode;
++
++		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++		d_instantiate(dentry, inode);
++		T(YAFFS_TRACE_OS, (KERN_DEBUG "symlink created OK\n"));
++		return 0;
++	} else {
++		T(YAFFS_TRACE_OS, (KERN_DEBUG "symlink not created\n"));
++
++	}
++
++	return -ENOMEM;
++}
++
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++			     int datasync)
++{
++
++	yaffs_Object *obj;
++	yaffs_Device *dev;
++
++	obj = yaffs_DentryToObject(dentry);
++
++	dev = obj->myDev;
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_sync_object\n"));
++	yaffs_GrossLock(dev);
++	yaffs_FlushFile(obj, 1);
++	yaffs_GrossUnlock(dev);
++	return 0;
++}
++
++/*
++ * The VFS layer already does all the dentry stuff for rename.
++ *
++ * NB: POSIX says you can rename an object over an old object of the same name
++ */
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++			struct inode *new_dir, struct dentry *new_dentry)
++{
++	yaffs_Device *dev;
++	int retVal = YAFFS_FAIL;
++	yaffs_Object *target;
++
++        T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_rename\n"));
++	dev = yaffs_InodeToObject(old_dir)->myDev;
++
++	yaffs_GrossLock(dev);
++
++	/* Check if the target is an existing directory that is not empty. */
++	target =
++	    yaffs_FindObjectByName(yaffs_InodeToObject(new_dir),
++				   new_dentry->d_name.name);
++	
++	
++
++	if (target &&
++	    target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
++	    !list_empty(&target->variant.directoryVariant.children)) {
++	    
++	        T(YAFFS_TRACE_OS, (KERN_DEBUG "target is non-empty dir\n"));
++
++		retVal = YAFFS_FAIL;
++	} else {
++
++		/* Now does unlinking internally using shadowing mechanism */
++	        T(YAFFS_TRACE_OS, (KERN_DEBUG "calling yaffs_RenameObject\n"));
++		
++		retVal =
++		    yaffs_RenameObject(yaffs_InodeToObject(old_dir),
++				       old_dentry->d_name.name,
++				       yaffs_InodeToObject(new_dir),
++				       new_dentry->d_name.name);
++
++	}
++	yaffs_GrossUnlock(dev);
++
++	if (retVal == YAFFS_OK) {
++		if(target) {
++			new_dentry->d_inode->i_nlink--;
++			mark_inode_dirty(new_dentry->d_inode);
++		}
++
++		return 0;
++	} else {
++		return -ENOTEMPTY;
++	}
++
++}
++
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++	struct inode *inode = dentry->d_inode;
++	int error;
++	yaffs_Device *dev;
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_setattr of object %d\n",
++	   yaffs_InodeToObject(inode)->objectId));
++
++	if ((error = inode_change_ok(inode, attr)) == 0) {
++
++		dev = yaffs_InodeToObject(inode)->myDev;
++		yaffs_GrossLock(dev);
++		if (yaffs_SetAttributes(yaffs_InodeToObject(inode), attr) ==
++		    YAFFS_OK) {
++			error = 0;
++		} else {
++			error = -EPERM;
++		}
++		yaffs_GrossUnlock(dev);
++		if (!error)
++			error = inode_setattr(inode, attr);
++	}
++	return error;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	yaffs_Device *dev = yaffs_DentryToObject(dentry)->myDev;
++	struct super_block *sb = dentry->d_sb;
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
++{
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++#endif
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_statfs\n"));
++
++	yaffs_GrossLock(dev);
++
++	buf->f_type = YAFFS_MAGIC;
++	buf->f_bsize = sb->s_blocksize;
++	buf->f_namelen = 255;
++	if (sb->s_blocksize > dev->nDataBytesPerChunk) {
++
++		buf->f_blocks =
++		    (dev->endBlock - dev->startBlock +
++		     1) * dev->nChunksPerBlock / (sb->s_blocksize /
++						  dev->nDataBytesPerChunk);
++		buf->f_bfree =
++		    yaffs_GetNumberOfFreeChunks(dev) / (sb->s_blocksize /
++							dev->nDataBytesPerChunk);
++	} else {
++
++		buf->f_blocks =
++		    (dev->endBlock - dev->startBlock +
++		     1) * dev->nChunksPerBlock * (dev->nDataBytesPerChunk /
++						  sb->s_blocksize);
++		buf->f_bfree =
++		    yaffs_GetNumberOfFreeChunks(dev) * (dev->nDataBytesPerChunk /
++							sb->s_blocksize);
++	}
++	buf->f_files = 0;
++	buf->f_ffree = 0;
++	buf->f_bavail = buf->f_bfree;
++
++	yaffs_GrossUnlock(dev);
++	return 0;
++}
++
++
++/**
++static int yaffs_do_sync_fs(struct super_block *sb)
++{
++
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_do_sync_fs\n"));
++
++	if(sb->s_dirt) {
++		yaffs_GrossLock(dev);
++
++		if(dev)
++			yaffs_CheckpointSave(dev);
++		
++		yaffs_GrossUnlock(dev);
++
++		sb->s_dirt = 0;
++	}
++	return 0;
++}
++**/
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static void yaffs_write_super(struct super_block *sb)
++#else
++static int yaffs_write_super(struct super_block *sb)
++#endif
++{
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_write_super\n"));
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
++	return 0; /* yaffs_do_sync_fs(sb);*/
++#endif
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static int yaffs_sync_fs(struct super_block *sb, int wait)
++#else
++static int yaffs_sync_fs(struct super_block *sb)
++#endif
++{
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_sync_fs\n"));
++	
++	return 0; /* yaffs_do_sync_fs(sb);*/
++	
++}
++
++
++static void yaffs_read_inode(struct inode *inode)
++{
++	/* NB This is called as a side effect of other functions, but
++	 * we had to release the lock to prevent deadlocks, so 
++	 * need to lock again.
++	 */
++
++	yaffs_Object *obj;
++	yaffs_Device *dev = yaffs_SuperToDevice(inode->i_sb);
++
++	T(YAFFS_TRACE_OS,
++	  (KERN_DEBUG "yaffs_read_inode for %d\n", (int)inode->i_ino));
++
++	yaffs_GrossLock(dev);
++	
++	obj = yaffs_FindObjectByNumber(dev, inode->i_ino);
++
++	yaffs_FillInodeFromObject(inode, obj);
++
++	yaffs_GrossUnlock(dev);
++}
++
++static LIST_HEAD(yaffs_dev_list);
++
++static int yaffs_remount_fs(struct super_block *sb, int *flags, char *data)
++{
++	yaffs_Device    *dev = yaffs_SuperToDevice(sb);
++
++	if( *flags & MS_RDONLY ) {
++		struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
++	    
++		T(YAFFS_TRACE_OS,
++			(KERN_DEBUG "yaffs_remount_fs: %s: RO\n", dev->name ));
++
++		yaffs_GrossLock(dev);
++     	 
++		yaffs_FlushEntireDeviceCache(dev);
++    	
++		yaffs_CheckpointSave(dev);
++ 
++		if (mtd->sync)
++			mtd->sync(mtd);
++
++		yaffs_GrossUnlock(dev);
++	}
++	else {
++		T(YAFFS_TRACE_OS, 
++			(KERN_DEBUG "yaffs_remount_fs: %s: RW\n", dev->name ));
++	}
++ 
++	return 0;
++}
++
++static void yaffs_put_super(struct super_block *sb)
++{
++	yaffs_Device *dev = yaffs_SuperToDevice(sb);
++
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_put_super\n"));
++
++	yaffs_GrossLock(dev);
++	
++	yaffs_FlushEntireDeviceCache(dev);
++
++	yaffs_CheckpointSave(dev);
++
++	if (dev->putSuperFunc) {
++		dev->putSuperFunc(sb);
++	}
++
++	yaffs_Deinitialise(dev);
++	
++	yaffs_GrossUnlock(dev);
++
++	/* we assume this is protected by lock_kernel() in mount/umount */
++	list_del(&dev->devList);
++	
++	if(dev->spareBuffer){
++		YFREE(dev->spareBuffer);
++		dev->spareBuffer = NULL;
++	}
++
++	kfree(dev);
++}
++
++
++static void yaffs_MTDPutSuper(struct super_block *sb)
++{
++
++	struct mtd_info *mtd = yaffs_SuperToDevice(sb)->genericDevice;
++
++	if (mtd->sync) {
++		mtd->sync(mtd);
++	}
++
++	put_mtd_device(mtd);
++}
++
++
++static void yaffs_MarkSuperBlockDirty(void *vsb)
++{
++	struct super_block *sb = (struct super_block *)vsb;
++	
++	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_MarkSuperBlockDirty() sb = %p\n",sb));
++//	if(sb)
++//		sb->s_dirt = 1;
++}
++
++typedef struct {
++	int inband_tags;
++	int skip_checkpoint_read;
++	int skip_checkpoint_write;
++	int no_cache;
++} yaffs_options;
++
++#define MAX_OPT_LEN 20
++static int yaffs_parse_options(yaffs_options *options, const char *options_str)
++{
++	char cur_opt[MAX_OPT_LEN+1];
++	int p;
++	int error = 0;
++	
++	/* Parse through the options which is a comma seperated list */
++	
++	while(options_str && *options_str && !error){
++		memset(cur_opt,0,MAX_OPT_LEN+1);
++		p = 0;
++		
++		while(*options_str && *options_str != ','){
++			if(p < MAX_OPT_LEN){
++				cur_opt[p] = *options_str;
++				p++;
++			}
++			options_str++;
++		}
++		
++		if(!strcmp(cur_opt,"inband-tags"))
++			options->inband_tags = 1;
++		else if(!strcmp(cur_opt,"no-cache"))
++			options->no_cache = 1;
++		else if(!strcmp(cur_opt,"no-checkpoint-read"))
++			options->skip_checkpoint_read = 1;
++		else if(!strcmp(cur_opt,"no-checkpoint-write"))
++			options->skip_checkpoint_write = 1;
++		else if(!strcmp(cur_opt,"no-checkpoint")){
++			options->skip_checkpoint_read = 1;
++			options->skip_checkpoint_write = 1;
++		} else {
++			printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",cur_opt);
++			error = 1;
++		}
++		
++	}
++
++	return error;
++}
++
++static struct super_block *yaffs_internal_read_super(int yaffsVersion,
++						     struct super_block *sb,
++						     void *data, int silent)
++{
++	int nBlocks;
++	struct inode *inode = NULL;
++	struct dentry *root;
++	yaffs_Device *dev = 0;
++	char devname_buf[BDEVNAME_SIZE + 1];
++	struct mtd_info *mtd;
++	int err;
++	char *data_str = (char *)data;
++	
++	yaffs_options options;
++
++	sb->s_magic = YAFFS_MAGIC;
++	sb->s_op = &yaffs_super_ops;
++
++	if (!sb)
++		printk(KERN_INFO "yaffs: sb is NULL\n");
++	else if (!sb->s_dev)
++		printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
++	else if (!yaffs_devname(sb, devname_buf))
++		printk(KERN_INFO "yaffs: devname is NULL\n");
++	else
++		printk(KERN_INFO "yaffs: dev is %d name is \"%s\"\n",
++		       sb->s_dev,
++		       yaffs_devname(sb, devname_buf));
++		    
++	if(!data_str)
++		data_str = "";
++   
++	printk(KERN_INFO "yaffs: passed flags \"%s\"\n",data_str);
++	
++	memset(&options,0,sizeof(options));
++	
++	if(yaffs_parse_options(&options,data_str)){
++		/* Option parsing failed */
++		return NULL;
++	}
++
++
++	sb->s_blocksize = PAGE_CACHE_SIZE;
++	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
++	T(YAFFS_TRACE_OS, ("yaffs_read_super: Using yaffs%d\n", yaffsVersion));
++	T(YAFFS_TRACE_OS,
++	  ("yaffs_read_super: block size %d\n", (int)(sb->s_blocksize)));
++
++#ifdef CONFIG_YAFFS_DISABLE_WRITE_VERIFY
++	T(YAFFS_TRACE_OS,
++	  ("yaffs: Write verification disabled. All guarantees "
++	   "null and void\n"));
++#endif
++
++	T(YAFFS_TRACE_ALWAYS, ("yaffs: Attempting MTD mount on %u.%u, "
++			       "\"%s\"\n",
++			       MAJOR(sb->s_dev), MINOR(sb->s_dev),
++			       yaffs_devname(sb, devname_buf)));
++
++	/* Check it's an mtd device..... */
++	if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR) {
++		return NULL;	/* This isn't an mtd device */
++	}
++	/* Get the device */
++	mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
++	if (!mtd) {
++		T(YAFFS_TRACE_ALWAYS,
++		  ("yaffs: MTD device #%u doesn't appear to exist\n",
++		   MINOR(sb->s_dev)));
++		return NULL;
++	}
++	/* Check it's NAND */
++	if (mtd->type != MTD_NANDFLASH) {
++		T(YAFFS_TRACE_ALWAYS,
++		  ("yaffs: MTD device is not NAND it's type %d\n", mtd->type));
++		return NULL;
++	}
++
++	T(YAFFS_TRACE_OS, (" erase %p\n", mtd->erase));
++	T(YAFFS_TRACE_OS, (" read %p\n", mtd->read));
++	T(YAFFS_TRACE_OS, (" write %p\n", mtd->write));
++	T(YAFFS_TRACE_OS, (" readoob %p\n", mtd->read_oob));
++	T(YAFFS_TRACE_OS, (" writeoob %p\n", mtd->write_oob));
++	T(YAFFS_TRACE_OS, (" block_isbad %p\n", mtd->block_isbad));
++	T(YAFFS_TRACE_OS, (" block_markbad %p\n", mtd->block_markbad));
++	T(YAFFS_TRACE_OS, (" %s %d\n", WRITE_SIZE_STR, WRITE_SIZE(mtd)));
++	T(YAFFS_TRACE_OS, (" oobsize %d\n", mtd->oobsize));
++	T(YAFFS_TRACE_OS, (" erasesize %d\n", mtd->erasesize));
++	T(YAFFS_TRACE_OS, (" size %d\n", mtd->size));
++	
++#ifdef CONFIG_YAFFS_AUTO_YAFFS2
++
++	if (yaffsVersion == 1 && 
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	    mtd->writesize >= 2048) {
++#else
++	    mtd->oobblock >= 2048) {
++#endif
++	    T(YAFFS_TRACE_ALWAYS,("yaffs: auto selecting yaffs2\n"));
++	    yaffsVersion = 2;
++	}	
++	
++	/* Added NCB 26/5/2006 for completeness */
++	if (yaffsVersion == 2 && 
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	    mtd->writesize == 512) {
++#else
++	    mtd->oobblock == 512) {
++#endif
++	    T(YAFFS_TRACE_ALWAYS,("yaffs: auto selecting yaffs1\n"));
++	    yaffsVersion = 1;
++	}	
++
++#endif
++
++	if (yaffsVersion == 2) {
++		/* Check for version 2 style functions */
++		if (!mtd->erase ||
++		    !mtd->block_isbad ||
++		    !mtd->block_markbad ||
++		    !mtd->read ||
++		    !mtd->write ||
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++		    !mtd->read_oob || !mtd->write_oob) {
++#else
++		    !mtd->write_ecc ||
++		    !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
++#endif
++			T(YAFFS_TRACE_ALWAYS,
++			  ("yaffs: MTD device does not support required "
++			   "functions\n"));;
++			return NULL;
++		}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++		if (mtd->writesize < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++#else
++		if (mtd->oobblock < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++#endif
++		    mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) {
++			T(YAFFS_TRACE_ALWAYS,
++			  ("yaffs: MTD device does not have the "
++			   "right page sizes\n"));
++			return NULL;
++		}
++	} else {
++		/* Check for V1 style functions */
++		if (!mtd->erase ||
++		    !mtd->read ||
++		    !mtd->write ||
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++		    !mtd->read_oob || !mtd->write_oob) {
++#else
++		    !mtd->write_ecc ||
++		    !mtd->read_ecc || !mtd->read_oob || !mtd->write_oob) {
++#endif
++			T(YAFFS_TRACE_ALWAYS,
++			  ("yaffs: MTD device does not support required "
++			   "functions\n"));;
++			return NULL;
++		}
++
++		if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
++		    mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
++			T(YAFFS_TRACE_ALWAYS,
++			  ("yaffs: MTD device does not support have the "
++			   "right page sizes\n"));
++			return NULL;
++		}
++	}
++
++	/* OK, so if we got here, we have an MTD that's NAND and looks
++	 * like it has the right capabilities
++	 * Set the yaffs_Device up for mtd
++	 */
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++	sb->s_fs_info = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
++#else
++	sb->u.generic_sbp = dev = kmalloc(sizeof(yaffs_Device), GFP_KERNEL);
++#endif
++	if (!dev) {
++		/* Deep shit could not allocate device structure */
++		T(YAFFS_TRACE_ALWAYS,
++		  ("yaffs_read_super: Failed trying to allocate "
++		   "yaffs_Device. \n"));
++		return NULL;
++	}
++
++	memset(dev, 0, sizeof(yaffs_Device));
++	dev->genericDevice = mtd;
++	dev->name = mtd->name;
++
++	/* Set up the memory size parameters.... */
++
++	nBlocks = mtd->size / (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
++	dev->startBlock = 0;
++	dev->endBlock = nBlocks - 1;
++	dev->nChunksPerBlock = YAFFS_CHUNKS_PER_BLOCK;
++	dev->nDataBytesPerChunk = YAFFS_BYTES_PER_CHUNK;
++	dev->nReservedBlocks = 5;
++	dev->nShortOpCaches = (options.no_cache) ? 0 : 10;
++
++	/* ... and the functions. */
++	if (yaffsVersion == 2) {
++		dev->writeChunkWithTagsToNAND =
++		    nandmtd2_WriteChunkWithTagsToNAND;
++		dev->readChunkWithTagsFromNAND =
++		    nandmtd2_ReadChunkWithTagsFromNAND;
++		dev->markNANDBlockBad = nandmtd2_MarkNANDBlockBad;
++		dev->queryNANDBlock = nandmtd2_QueryNANDBlock;
++		dev->spareBuffer = YMALLOC(mtd->oobsize);
++		dev->isYaffs2 = 1;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++		dev->nDataBytesPerChunk = mtd->writesize;
++		dev->nChunksPerBlock = mtd->erasesize / mtd->writesize;
++#else
++		dev->nDataBytesPerChunk = mtd->oobblock;
++		dev->nChunksPerBlock = mtd->erasesize / mtd->oobblock;
++#endif
++		nBlocks = mtd->size / mtd->erasesize;
++
++		dev->nCheckpointReservedBlocks = CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS;
++		dev->startBlock = 0;
++		dev->endBlock = nBlocks - 1;
++	} else {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++		/* use the MTD interface in yaffs_mtdif1.c */
++		dev->writeChunkWithTagsToNAND =
++			nandmtd1_WriteChunkWithTagsToNAND;
++		dev->readChunkWithTagsFromNAND =
++			nandmtd1_ReadChunkWithTagsFromNAND;
++		dev->markNANDBlockBad = nandmtd1_MarkNANDBlockBad;
++		dev->queryNANDBlock = nandmtd1_QueryNANDBlock;
++#else
++		dev->writeChunkToNAND = nandmtd_WriteChunkToNAND;
++		dev->readChunkFromNAND = nandmtd_ReadChunkFromNAND;
++#endif
++		dev->isYaffs2 = 0;
++	}
++	/* ... and common functions */
++	dev->eraseBlockInNAND = nandmtd_EraseBlockInNAND;
++	dev->initialiseNAND = nandmtd_InitialiseNAND;
++
++	dev->putSuperFunc = yaffs_MTDPutSuper;
++	
++	dev->superBlock = (void *)sb;
++	dev->markSuperBlockDirty = yaffs_MarkSuperBlockDirty;
++	
++
++#ifndef CONFIG_YAFFS_DOES_ECC
++	dev->useNANDECC = 1;
++#endif
++
++#ifdef CONFIG_YAFFS_DISABLE_WIDE_TNODES
++	dev->wideTnodesDisabled = 1;
++#endif
++
++	dev->skipCheckpointRead = options.skip_checkpoint_read;
++	dev->skipCheckpointWrite = options.skip_checkpoint_write;
++	
++	/* we assume this is protected by lock_kernel() in mount/umount */
++	list_add_tail(&dev->devList, &yaffs_dev_list);
++
++	init_MUTEX(&dev->grossLock);
++
++	yaffs_GrossLock(dev);
++
++	err = yaffs_GutsInitialise(dev);
++
++	T(YAFFS_TRACE_OS,
++	  ("yaffs_read_super: guts initialised %s\n",
++	   (err == YAFFS_OK) ? "OK" : "FAILED"));
++	
++	/* Release lock before yaffs_get_inode() */
++	yaffs_GrossUnlock(dev);
++
++	/* Create root inode */
++	if (err == YAFFS_OK)
++		inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0,
++					yaffs_Root(dev));
++
++	if (!inode)
++		return NULL;
++
++	inode->i_op = &yaffs_dir_inode_operations;
++	inode->i_fop = &yaffs_dir_operations;
++
++	T(YAFFS_TRACE_OS, ("yaffs_read_super: got root inode\n"));
++
++	root = d_alloc_root(inode);
++
++	T(YAFFS_TRACE_OS, ("yaffs_read_super: d_alloc_root done\n"));
++
++	if (!root) {
++		iput(inode);
++		return NULL;
++	}
++	sb->s_root = root;
++
++	T(YAFFS_TRACE_OS, ("yaffs_read_super: done\n"));
++	return sb;
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
++					 int silent)
++{
++	return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static int yaffs_read_super(struct file_system_type *fs,
++			    int flags, const char *dev_name,
++			    void *data, struct vfsmount *mnt)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs_read_super(struct file_system_type *fs,
++					    int flags, const char *dev_name,
++					    void *data)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "yaffs",
++	.get_sb = yaffs_read_super,
++	.kill_sb = kill_block_super,
++	.fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
++					    int silent)
++{
++	return yaffs_internal_read_super(1, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
++		      FS_REQUIRES_DEV);
++#endif
++
++
++#ifdef CONFIG_YAFFS_YAFFS2
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
++					  int silent)
++{
++	return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static int yaffs2_read_super(struct file_system_type *fs,
++			int flags, const char *dev_name, void *data,
++			struct vfsmount *mnt)
++{
++	return get_sb_bdev(fs, flags, dev_name, data,
++			yaffs2_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs2_read_super(struct file_system_type *fs,
++					     int flags, const char *dev_name,
++					     void *data)
++{
++
++	return get_sb_bdev(fs, flags, dev_name, data,
++			   yaffs2_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs2_fs_type = {
++	.owner = THIS_MODULE,
++	.name = "yaffs2",
++	.get_sb = yaffs2_read_super,
++	.kill_sb = kill_block_super,
++	.fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs2_read_super(struct super_block *sb,
++					     void *data, int silent)
++{
++	return yaffs_internal_read_super(2, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
++		      FS_REQUIRES_DEV);
++#endif
++
++#endif				/* CONFIG_YAFFS_YAFFS2 */
++
++static struct proc_dir_entry *my_proc_entry;
++
++static char *yaffs_dump_dev(char *buf, yaffs_Device * dev)
++{
++	buf += sprintf(buf, "startBlock......... %d\n", dev->startBlock);
++	buf += sprintf(buf, "endBlock........... %d\n", dev->endBlock);
++	buf += sprintf(buf, "nDataBytesPerChunk. %d\n", dev->nDataBytesPerChunk);
++	buf += sprintf(buf, "chunkGroupBits..... %d\n", dev->chunkGroupBits);
++	buf += sprintf(buf, "chunkGroupSize..... %d\n", dev->chunkGroupSize);
++	buf += sprintf(buf, "nErasedBlocks...... %d\n", dev->nErasedBlocks);
++	buf += sprintf(buf, "nReservedBlocks.... %d\n", dev->nReservedBlocks);
++	buf += sprintf(buf, "nCheckptResBlocks.. %d\n", dev->nCheckpointReservedBlocks);
++	buf += sprintf(buf, "blocksInCheckpoint. %d\n", dev->blocksInCheckpoint);
++	buf += sprintf(buf, "nTnodesCreated..... %d\n", dev->nTnodesCreated);
++	buf += sprintf(buf, "nFreeTnodes........ %d\n", dev->nFreeTnodes);
++	buf += sprintf(buf, "nObjectsCreated.... %d\n", dev->nObjectsCreated);
++	buf += sprintf(buf, "nFreeObjects....... %d\n", dev->nFreeObjects);
++	buf += sprintf(buf, "nFreeChunks........ %d\n", dev->nFreeChunks);
++	buf += sprintf(buf, "nPageWrites........ %d\n", dev->nPageWrites);
++	buf += sprintf(buf, "nPageReads......... %d\n", dev->nPageReads);
++	buf += sprintf(buf, "nBlockErasures..... %d\n", dev->nBlockErasures);
++	buf += sprintf(buf, "nGCCopies.......... %d\n", dev->nGCCopies);
++	buf +=
++	    sprintf(buf, "garbageCollections. %d\n", dev->garbageCollections);
++	buf +=
++	    sprintf(buf, "passiveGCs......... %d\n",
++		    dev->passiveGarbageCollections);
++	buf += sprintf(buf, "nRetriedWrites..... %d\n", dev->nRetriedWrites);
++	buf += sprintf(buf, "nShortOpCaches..... %d\n", dev->nShortOpCaches);
++	buf += sprintf(buf, "nRetireBlocks...... %d\n", dev->nRetiredBlocks);
++	buf += sprintf(buf, "eccFixed........... %d\n", dev->eccFixed);
++	buf += sprintf(buf, "eccUnfixed......... %d\n", dev->eccUnfixed);
++	buf += sprintf(buf, "tagsEccFixed....... %d\n", dev->tagsEccFixed);
++	buf += sprintf(buf, "tagsEccUnfixed..... %d\n", dev->tagsEccUnfixed);
++	buf += sprintf(buf, "cacheHits.......... %d\n", dev->cacheHits);
++	buf += sprintf(buf, "nDeletedFiles...... %d\n", dev->nDeletedFiles);
++	buf += sprintf(buf, "nUnlinkedFiles..... %d\n", dev->nUnlinkedFiles);
++	buf +=
++	    sprintf(buf, "nBackgroudDeletions %d\n", dev->nBackgroundDeletions);
++	buf += sprintf(buf, "useNANDECC......... %d\n", dev->useNANDECC);
++	buf += sprintf(buf, "isYaffs2........... %d\n", dev->isYaffs2);
++
++	return buf;
++}
++
++static int yaffs_proc_read(char *page,
++			   char **start,
++			   off_t offset, int count, int *eof, void *data)
++{
++	struct list_head *item;
++	char *buf = page;
++	int step = offset;
++	int n = 0;
++
++	/* Get proc_file_read() to step 'offset' by one on each sucessive call.
++	 * We use 'offset' (*ppos) to indicate where we are in devList.
++	 * This also assumes the user has posted a read buffer large
++	 * enough to hold the complete output; but that's life in /proc.
++	 */
++
++	*(int *)start = 1;
++
++	/* Print header first */
++	if (step == 0) {
++		buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__
++			       "\n%s\n%s\n", yaffs_fs_c_version,
++			       yaffs_guts_c_version);
++	}
++
++	/* hold lock_kernel while traversing yaffs_dev_list */
++	lock_kernel();
++
++	/* Locate and print the Nth entry.  Order N-squared but N is small. */
++	list_for_each(item, &yaffs_dev_list) {
++		yaffs_Device *dev = list_entry(item, yaffs_Device, devList);
++		if (n < step) {
++			n++;
++			continue;
++		}
++		buf += sprintf(buf, "\nDevice %d \"%s\"\n", n, dev->name);
++		buf = yaffs_dump_dev(buf, dev);
++		break;
++	}
++	unlock_kernel();
++
++	return buf - page < count ? buf - page : count;
++}
++
++/**
++ * Set the verbosity of the warnings and error messages.
++ *
++ * Note that the names can only be a..z or _ with the current code.
++ */
++
++static struct {
++	char *mask_name;
++	unsigned mask_bitfield;
++} mask_flags[] = {
++	{"allocate", YAFFS_TRACE_ALLOCATE},
++	{"always", YAFFS_TRACE_ALWAYS},
++	{"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
++	{"buffers", YAFFS_TRACE_BUFFERS},
++	{"bug", YAFFS_TRACE_BUG},
++	{"checkpt", YAFFS_TRACE_CHECKPOINT},
++	{"deletion", YAFFS_TRACE_DELETION},
++	{"erase", YAFFS_TRACE_ERASE},
++	{"error", YAFFS_TRACE_ERROR},
++	{"gc_detail", YAFFS_TRACE_GC_DETAIL},
++	{"gc", YAFFS_TRACE_GC},
++	{"mtd", YAFFS_TRACE_MTD},
++	{"nandaccess", YAFFS_TRACE_NANDACCESS},
++	{"os", YAFFS_TRACE_OS},
++	{"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
++	{"scan", YAFFS_TRACE_SCAN},
++	{"tracing", YAFFS_TRACE_TRACING},
++
++	{"verify", YAFFS_TRACE_VERIFY},
++	{"verify_nand", YAFFS_TRACE_VERIFY_NAND},
++	{"verify_full", YAFFS_TRACE_VERIFY_FULL},
++	{"verify_all", YAFFS_TRACE_VERIFY_ALL},
++
++	{"write", YAFFS_TRACE_WRITE},
++	{"all", 0xffffffff},
++	{"none", 0},
++	{NULL, 0},
++};
++
++#define MAX_MASK_NAME_LENGTH 40
++static int yaffs_proc_write(struct file *file, const char *buf,
++					 unsigned long count, void *data)
++{
++	unsigned rg = 0, mask_bitfield;
++	char *end;
++	char *mask_name;
++	char *x; 
++	char substring[MAX_MASK_NAME_LENGTH+1];
++	int i;
++	int done = 0;
++	int add, len = 0;
++	int pos = 0;
++
++	rg = yaffs_traceMask;
++
++	while (!done && (pos < count)) {
++		done = 1;
++		while ((pos < count) && isspace(buf[pos])) {
++			pos++;
++		}
++
++		switch (buf[pos]) {
++		case '+':
++		case '-':
++		case '=':
++			add = buf[pos];
++			pos++;
++			break;
++
++		default:
++			add = ' ';
++			break;
++		}
++		mask_name = NULL;
++		
++		mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++		if (end > buf + pos) {
++			mask_name = "numeral";
++			len = end - (buf + pos);
++			done = 0;
++		} else {
++			for(x = buf + pos, i = 0; 
++			    (*x == '_' || (*x >='a' && *x <= 'z')) &&
++			    i <MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++			    substring[i] = *x;
++			substring[i] = '\0';
++			
++			for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++				//len = strlen(mask_flags[i].mask_name);
++				//if (strncmp(buf + pos, mask_flags[i].mask_name, len) == 0) {
++				if(strcmp(substring,mask_flags[i].mask_name) == 0){
++					mask_name = mask_flags[i].mask_name;
++					mask_bitfield = mask_flags[i].mask_bitfield;
++					done = 0;
++					break;
++				}
++			}
++		}
++
++		if (mask_name != NULL) {
++			// pos += len;
++			done = 0;
++			switch(add) {
++			case '-':
++				rg &= ~mask_bitfield;
++				break;
++			case '+':
++				rg |= mask_bitfield;
++				break;
++			case '=':
++				rg = mask_bitfield;
++				break;
++			default:
++				rg |= mask_bitfield;
++				break;
++			}
++		}
++	}
++
++	yaffs_traceMask = rg | YAFFS_TRACE_ALWAYS;
++	
++	printk("new trace = 0x%08X\n",yaffs_traceMask);
++	
++	if (rg & YAFFS_TRACE_ALWAYS) {
++		for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++			char flag;
++			flag = ((rg & mask_flags[i].mask_bitfield) == mask_flags[i].mask_bitfield) ? '+' : '-';
++			printk("%c%s\n", flag, mask_flags[i].mask_name);
++		}
++	}
++
++	return count;
++}
++
++/* Stuff to handle installation of file systems */
++struct file_system_to_install {
++	struct file_system_type *fst;
++	int installed;
++};
++
++static struct file_system_to_install fs_to_install[] = {
++//#ifdef CONFIG_YAFFS_YAFFS1
++	{&yaffs_fs_type, 0},
++//#endif
++//#ifdef CONFIG_YAFFS_YAFFS2
++	{&yaffs2_fs_type, 0},
++//#endif
++	{NULL, 0}
++};
++
++static int __init init_yaffs_fs(void)
++{
++	int error = 0;
++	struct file_system_to_install *fsinst;
++
++	T(YAFFS_TRACE_ALWAYS,
++	  ("yaffs " __DATE__ " " __TIME__ " Installing. \n"));
++
++	/* Install the proc_fs entry */
++	my_proc_entry = create_proc_entry("yaffs",
++					       S_IRUGO | S_IFREG,
++					       &proc_root);
++
++	if (my_proc_entry) {
++		my_proc_entry->write_proc = yaffs_proc_write;
++		my_proc_entry->read_proc = yaffs_proc_read;
++		my_proc_entry->data = NULL;
++	} else {
++		return -ENOMEM;
++	}
++
++	/* Now add the file system entries */
++
++	fsinst = fs_to_install;
++
++	while (fsinst->fst && !error) {
++		error = register_filesystem(fsinst->fst);
++		if (!error) {
++			fsinst->installed = 1;
++		}
++		fsinst++;
++	}
++
++	/* Any errors? uninstall  */
++	if (error) {
++		fsinst = fs_to_install;
++
++		while (fsinst->fst) {
++			if (fsinst->installed) {
++				unregister_filesystem(fsinst->fst);
++				fsinst->installed = 0;
++			}
++			fsinst++;
++		}
++	}
++
++	return error;
++}
++
++static void __exit exit_yaffs_fs(void)
++{
++
++	struct file_system_to_install *fsinst;
++
++	T(YAFFS_TRACE_ALWAYS, ("yaffs " __DATE__ " " __TIME__
++			       " removing. \n"));
++
++	remove_proc_entry("yaffs", &proc_root);
++
++	fsinst = fs_to_install;
++
++	while (fsinst->fst) {
++		if (fsinst->installed) {
++			unregister_filesystem(fsinst->fst);
++			fsinst->installed = 0;
++		}
++		fsinst++;
++	}
++
++}
++
++module_init(init_yaffs_fs)
++module_exit(exit_yaffs_fs)
++
++MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
++MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2006");
++MODULE_LICENSE("GPL");
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_guts.c linux-2.6.21.1.new/fs/yaffs2/yaffs_guts.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_guts.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_guts.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,7469 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++const char *yaffs_guts_c_version =
++    "$Id: yaffs_guts.c,v 1.49 2007-05-15 20:07:40 charles Exp $";
++
++#include "yportenv.h"
++
++#include "yaffsinterface.h"
++#include "yaffs_guts.h"
++#include "yaffs_tagsvalidity.h"
++
++#include "yaffs_tagscompat.h"
++#ifndef  CONFIG_YAFFS_USE_OWN_SORT
++#include "yaffs_qsort.h"
++#endif
++#include "yaffs_nand.h"
++
++#include "yaffs_checkptrw.h"
++
++#include "yaffs_nand.h"
++#include "yaffs_packedtags2.h"
++
++
++#ifdef CONFIG_YAFFS_WINCE
++void yfsd_LockYAFFS(BOOL fsLockOnly);
++void yfsd_UnlockYAFFS(BOOL fsLockOnly);
++#endif
++
++#define YAFFS_PASSIVE_GC_CHUNKS 2
++
++#include "yaffs_ecc.h"
++
++
++/* Robustification (if it ever comes about...) */
++static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND);
++static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk);
++static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
++				     const __u8 * data,
++				     const yaffs_ExtendedTags * tags);
++static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
++				    const yaffs_ExtendedTags * tags);
++
++/* Other local prototypes */
++static int yaffs_UnlinkObject( yaffs_Object *obj);
++static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj);
++
++static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList);
++
++static int yaffs_WriteNewChunkWithTagsToNAND(yaffs_Device * dev,
++					     const __u8 * buffer,
++					     yaffs_ExtendedTags * tags,
++					     int useReserve);
++static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode,
++				  int chunkInNAND, int inScan);
++
++static yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number,
++					   yaffs_ObjectType type);
++static void yaffs_AddObjectToDirectory(yaffs_Object * directory,
++				       yaffs_Object * obj);
++static int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name,
++				    int force, int isShrink, int shadows);
++static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj);
++static int yaffs_CheckStructures(void);
++static int yaffs_DeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, __u32 level,
++			      int chunkOffset, int *limit);
++static int yaffs_DoGenericObjectDeletion(yaffs_Object * in);
++
++static yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blockNo);
++
++static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo);
++static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer,
++				    int lineNo);
++
++static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
++				  int chunkInNAND);
++
++static int yaffs_UnlinkWorker(yaffs_Object * obj);
++static void yaffs_DestroyObject(yaffs_Object * obj);
++
++static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId,
++			   int chunkInObject);
++
++loff_t yaffs_GetFileSize(yaffs_Object * obj);
++
++static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr);
++
++static void yaffs_VerifyFreeChunks(yaffs_Device * dev);
++
++static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in);
++
++#ifdef YAFFS_PARANOID
++static int yaffs_CheckFileSanity(yaffs_Object * in);
++#else
++#define yaffs_CheckFileSanity(in)
++#endif
++
++static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in);
++static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId);
++
++static void yaffs_InvalidateCheckpoint(yaffs_Device *dev);
++
++static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode,
++				 yaffs_ExtendedTags * tags);
++
++static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos);
++static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev,
++					  yaffs_FileStructure * fStruct,
++					  __u32 chunkId);
++
++
++/* Function to calculate chunk and offset */
++
++static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, __u32 *chunk, __u32 *offset)
++{
++	if(dev->chunkShift){
++		/* Easy-peasy power of 2 case */
++		*chunk  = (__u32)(addr >> dev->chunkShift);
++		*offset = (__u32)(addr & dev->chunkMask);
++	}
++	else if(dev->crumbsPerChunk)
++	{
++		/* Case where we're using "crumbs" */
++		*offset = (__u32)(addr & dev->crumbMask);
++		addr >>= dev->crumbShift;
++		*chunk = ((__u32)addr)/dev->crumbsPerChunk;
++		*offset += ((addr - (*chunk * dev->crumbsPerChunk)) << dev->crumbShift);
++	}
++	else
++		YBUG();
++}
++
++/* Function to return the number of shifts for a power of 2 greater than or equal 
++ * to the given number
++ * Note we don't try to cater for all possible numbers and this does not have to
++ * be hellishly efficient.
++ */
++ 
++static __u32 ShiftsGE(__u32 x)
++{
++	int extraBits;
++	int nShifts;
++	
++	nShifts = extraBits = 0;
++	
++	while(x>1){
++		if(x & 1) extraBits++;
++		x>>=1;
++		nShifts++;
++	}
++
++	if(extraBits) 
++		nShifts++;
++		
++	return nShifts;
++}
++
++/* Function to return the number of shifts to get a 1 in bit 0
++ */
++ 
++static __u32 ShiftDiv(__u32 x)
++{
++	int nShifts;
++	
++	nShifts =  0;
++	
++	if(!x) return 0;
++	
++	while( !(x&1)){
++		x>>=1;
++		nShifts++;
++	}
++		
++	return nShifts;
++}
++
++
++
++/* 
++ * Temporary buffer manipulations.
++ */
++
++static int yaffs_InitialiseTempBuffers(yaffs_Device *dev)	
++{
++	int i;
++	__u8 *buf = (__u8 *)1;
++		
++	memset(dev->tempBuffer,0,sizeof(dev->tempBuffer));
++		
++	for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
++		dev->tempBuffer[i].line = 0;	/* not in use */
++		dev->tempBuffer[i].buffer = buf =
++		    YMALLOC_DMA(dev->nDataBytesPerChunk);
++	}
++		
++	return buf ? YAFFS_OK : YAFFS_FAIL;
++	
++}
++
++static __u8 *yaffs_GetTempBuffer(yaffs_Device * dev, int lineNo)
++{
++	int i, j;
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		if (dev->tempBuffer[i].line == 0) {
++			dev->tempBuffer[i].line = lineNo;
++			if ((i + 1) > dev->maxTemp) {
++				dev->maxTemp = i + 1;
++				for (j = 0; j <= i; j++)
++					dev->tempBuffer[j].maxLine =
++					    dev->tempBuffer[j].line;
++			}
++
++			return dev->tempBuffer[i].buffer;
++		}
++	}
++
++	T(YAFFS_TRACE_BUFFERS,
++	  (TSTR("Out of temp buffers at line %d, other held by lines:"),
++	   lineNo));
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		T(YAFFS_TRACE_BUFFERS, (TSTR(" %d "), dev->tempBuffer[i].line));
++	}
++	T(YAFFS_TRACE_BUFFERS, (TSTR(" " TENDSTR)));
++
++	/*
++	 * If we got here then we have to allocate an unmanaged one
++	 * This is not good.
++	 */
++
++	dev->unmanagedTempAllocations++;
++	return YMALLOC(dev->nDataBytesPerChunk);
++
++}
++
++static void yaffs_ReleaseTempBuffer(yaffs_Device * dev, __u8 * buffer,
++				    int lineNo)
++{
++	int i;
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		if (dev->tempBuffer[i].buffer == buffer) {
++			dev->tempBuffer[i].line = 0;
++			return;
++		}
++	}
++
++	if (buffer) {
++		/* assume it is an unmanaged one. */
++		T(YAFFS_TRACE_BUFFERS,
++		  (TSTR("Releasing unmanaged temp buffer in line %d" TENDSTR),
++		   lineNo));
++		YFREE(buffer);
++		dev->unmanagedTempDeallocations++;
++	}
++
++}
++
++/*
++ * Determine if we have a managed buffer.
++ */
++int yaffs_IsManagedTempBuffer(yaffs_Device * dev, const __u8 * buffer)
++{
++	int i;
++	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++		if (dev->tempBuffer[i].buffer == buffer)
++			return 1;
++
++	}
++
++    for (i = 0; i < dev->nShortOpCaches; i++) {
++        if( dev->srCache[i].data == buffer )
++            return 1;
++
++    }
++
++    if (buffer == dev->checkpointBuffer)
++      return 1;
++
++    T(YAFFS_TRACE_ALWAYS,
++	  (TSTR("yaffs: unmaged buffer detected.\n" TENDSTR)));
++    return 0;
++}
++
++
++
++/*
++ * Chunk bitmap manipulations
++ */
++
++static Y_INLINE __u8 *yaffs_BlockBits(yaffs_Device * dev, int blk)
++{
++	if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR("**>> yaffs: BlockBits block %d is not valid" TENDSTR),
++		   blk));
++		YBUG();
++	}
++	return dev->chunkBits +
++	    (dev->chunkBitmapStride * (blk - dev->internalStartBlock));
++}
++
++static Y_INLINE void yaffs_VerifyChunkBitId(yaffs_Device *dev, int blk, int chunk)
++{
++	if(blk < dev->internalStartBlock || blk > dev->internalEndBlock ||
++	   chunk < 0 || chunk >= dev->nChunksPerBlock) {
++	   T(YAFFS_TRACE_ERROR,
++	    (TSTR("**>> yaffs: Chunk Id (%d:%d) invalid"TENDSTR),blk,chunk));
++	    YBUG();
++	}
++}
++
++static Y_INLINE void yaffs_ClearChunkBits(yaffs_Device * dev, int blk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++	memset(blkBits, 0, dev->chunkBitmapStride);
++}
++
++static Y_INLINE void yaffs_ClearChunkBit(yaffs_Device * dev, int blk, int chunk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++
++	yaffs_VerifyChunkBitId(dev,blk,chunk);
++
++	blkBits[chunk / 8] &= ~(1 << (chunk & 7));
++}
++
++static Y_INLINE void yaffs_SetChunkBit(yaffs_Device * dev, int blk, int chunk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++	
++	yaffs_VerifyChunkBitId(dev,blk,chunk);
++
++	blkBits[chunk / 8] |= (1 << (chunk & 7));
++}
++
++static Y_INLINE int yaffs_CheckChunkBit(yaffs_Device * dev, int blk, int chunk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++	yaffs_VerifyChunkBitId(dev,blk,chunk);
++
++	return (blkBits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++}
++
++static Y_INLINE int yaffs_StillSomeChunkBits(yaffs_Device * dev, int blk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++	int i;
++	for (i = 0; i < dev->chunkBitmapStride; i++) {
++		if (*blkBits)
++			return 1;
++		blkBits++;
++	}
++	return 0;
++}
++
++static int yaffs_CountChunkBits(yaffs_Device * dev, int blk)
++{
++	__u8 *blkBits = yaffs_BlockBits(dev, blk);
++	int i;
++	int n = 0;
++	for (i = 0; i < dev->chunkBitmapStride; i++) {
++		__u8 x = *blkBits;
++		while(x){
++			if(x & 1)
++				n++;
++			x >>=1;
++		}
++			
++		blkBits++;
++	}
++	return n;
++}
++
++/* 
++ * Verification code
++ */
++ 
++static int yaffs_SkipVerification(yaffs_Device *dev)
++{
++	return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_SkipFullVerification(yaffs_Device *dev)
++{
++	return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_SkipNANDVerification(yaffs_Device *dev)
++{
++	return !(yaffs_traceMask & (YAFFS_TRACE_VERIFY_NAND));
++}
++
++static const char * blockStateName[] = {
++"Unknown",
++"Needs scanning",
++"Scanning",
++"Empty",
++"Allocating",
++"Full",
++"Dirty",
++"Checkpoint",
++"Collecting",
++"Dead"
++};
++
++static void yaffs_VerifyBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n)
++{
++	int actuallyUsed;
++	int inUse;
++	
++	if(yaffs_SkipVerification(dev))
++		return;
++		
++	/* Report illegal runtime states */
++	if(bi->blockState <0 || bi->blockState >= YAFFS_NUMBER_OF_BLOCK_STATES)
++		T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has undefined state %d"TENDSTR),n,bi->blockState));
++		
++	switch(bi->blockState){
++	 case YAFFS_BLOCK_STATE_UNKNOWN:
++	 case YAFFS_BLOCK_STATE_SCANNING:
++	 case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
++		T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has bad run-state %s"TENDSTR),
++		n,blockStateName[bi->blockState]));
++	}
++	
++	/* Check pages in use and soft deletions are legal */
++	
++	actuallyUsed = bi->pagesInUse - bi->softDeletions;
++	
++	if(bi->pagesInUse < 0 || bi->pagesInUse > dev->nChunksPerBlock ||
++	   bi->softDeletions < 0 || bi->softDeletions > dev->nChunksPerBlock ||
++	   actuallyUsed < 0 || actuallyUsed > dev->nChunksPerBlock)
++		T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has illegal values pagesInUsed %d softDeletions %d"TENDSTR),
++		n,bi->pagesInUse,bi->softDeletions));
++	
++		
++	/* Check chunk bitmap legal */
++	inUse = yaffs_CountChunkBits(dev,n);
++	if(inUse != bi->pagesInUse)
++		T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has inconsistent values pagesInUse %d counted chunk bits %d"TENDSTR),
++			n,bi->pagesInUse,inUse));
++	
++	/* Check that the sequence number is valid.
++	 * Ten million is legal, but is very unlikely 
++	 */
++	if(dev->isYaffs2 && 
++	   (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING || bi->blockState == YAFFS_BLOCK_STATE_FULL) &&
++	   (bi->sequenceNumber < YAFFS_LOWEST_SEQUENCE_NUMBER || bi->sequenceNumber > 10000000 ))
++		T(YAFFS_TRACE_VERIFY,(TSTR("Block %d has suspect sequence number of %d"TENDSTR),
++		n,bi->sequenceNumber));
++		
++}
++
++static void yaffs_VerifyCollectedBlock(yaffs_Device *dev,yaffs_BlockInfo *bi,int n)
++{
++	yaffs_VerifyBlock(dev,bi,n);
++	
++	/* After collection the block should be in the erased state */
++	/* TODO: This will need to change if we do partial gc */
++	
++	if(bi->blockState != YAFFS_BLOCK_STATE_EMPTY){
++		T(YAFFS_TRACE_ERROR,(TSTR("Block %d is in state %d after gc, should be erased"TENDSTR),
++			n,bi->blockState));
++	}
++}
++
++static void yaffs_VerifyBlocks(yaffs_Device *dev)
++{
++	int i;
++	int nBlocksPerState[YAFFS_NUMBER_OF_BLOCK_STATES];
++	int nIllegalBlockStates = 0;
++	
++
++	if(yaffs_SkipVerification(dev))
++		return;
++
++	memset(nBlocksPerState,0,sizeof(nBlocksPerState));
++
++		
++	for(i = dev->internalStartBlock; i <= dev->internalEndBlock; i++){
++		yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
++		yaffs_VerifyBlock(dev,bi,i);
++
++		if(bi->blockState >=0 && bi->blockState < YAFFS_NUMBER_OF_BLOCK_STATES)
++			nBlocksPerState[bi->blockState]++;
++		else
++			nIllegalBlockStates++;
++					
++	}
++	
++	T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR)));
++	T(YAFFS_TRACE_VERIFY,(TSTR("Block summary"TENDSTR)));
++	
++	T(YAFFS_TRACE_VERIFY,(TSTR("%d blocks have illegal states"TENDSTR),nIllegalBlockStates));
++	if(nBlocksPerState[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++		T(YAFFS_TRACE_VERIFY,(TSTR("Too many allocating blocks"TENDSTR)));
++
++	for(i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++		T(YAFFS_TRACE_VERIFY,
++		  (TSTR("%s %d blocks"TENDSTR),
++		  blockStateName[i],nBlocksPerState[i]));
++	
++	if(dev->blocksInCheckpoint != nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT])
++		T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Checkpoint block count wrong dev %d count %d"TENDSTR),
++		 dev->blocksInCheckpoint, nBlocksPerState[YAFFS_BLOCK_STATE_CHECKPOINT]));
++		 
++	if(dev->nErasedBlocks != nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY])
++		T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Erased block count wrong dev %d count %d"TENDSTR),
++		 dev->nErasedBlocks, nBlocksPerState[YAFFS_BLOCK_STATE_EMPTY]));
++		 
++	if(nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++		T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Too many collecting blocks %d (max is 1)"TENDSTR),
++		 nBlocksPerState[YAFFS_BLOCK_STATE_COLLECTING]));
++
++	T(YAFFS_TRACE_VERIFY,(TSTR(""TENDSTR)));
++
++}
++
++/*
++ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
++ * case those tests will not be performed.
++ */
++static void yaffs_VerifyObjectHeader(yaffs_Object *obj, yaffs_ObjectHeader *oh, yaffs_ExtendedTags *tags, int parentCheck)
++{
++	if(yaffs_SkipVerification(obj->myDev))
++		return;
++		
++	if(!(tags && obj && oh)){
++	 	T(YAFFS_TRACE_VERIFY,
++		 		(TSTR("Verifying object header tags %x obj %x oh %x"TENDSTR),
++		 		(__u32)tags,(__u32)obj,(__u32)oh));
++		return;
++	}
++	
++	if(oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++	   oh->type > YAFFS_OBJECT_TYPE_MAX)
++	 	T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Obj %d header type is illegal value 0x%x"TENDSTR),
++		 tags->objectId, oh->type));
++
++	if(tags->objectId != obj->objectId)
++	 	T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Obj %d header mismatch objectId %d"TENDSTR),
++		 tags->objectId, obj->objectId));
++
++
++	/*
++	 * Check that the object's parent ids match if parentCheck requested.
++	 * 
++	 * Tests do not apply to the root object.
++	 */
++	
++	if(parentCheck && tags->objectId > 1 && !obj->parent)
++	 	T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Obj %d header mismatch parentId %d obj->parent is NULL"TENDSTR),
++	 	 tags->objectId, oh->parentObjectId));
++		
++	
++	if(parentCheck && obj->parent &&
++	   oh->parentObjectId != obj->parent->objectId && 
++	   (oh->parentObjectId != YAFFS_OBJECTID_UNLINKED ||
++	    obj->parent->objectId != YAFFS_OBJECTID_DELETED))
++	 	T(YAFFS_TRACE_VERIFY,
++		 (TSTR("Obj %d header mismatch parentId %d parentObjectId %d"TENDSTR),
++	 	 tags->objectId, oh->parentObjectId, obj->parent->objectId));
++		
++	
++	if(tags->objectId > 1 && oh->name[0] == 0) /* Null name */
++		T(YAFFS_TRACE_VERIFY,
++		(TSTR("Obj %d header name is NULL"TENDSTR),
++		 obj->objectId));
++
++	if(tags->objectId > 1 && ((__u8)(oh->name[0])) == 0xff) /* Trashed name */
++		T(YAFFS_TRACE_VERIFY,
++		(TSTR("Obj %d header name is 0xFF"TENDSTR),
++		 obj->objectId));
++}
++
++
++
++static int yaffs_VerifyTnodeWorker(yaffs_Object * obj, yaffs_Tnode * tn,
++				  	__u32 level, int chunkOffset)
++{
++	int i;
++	yaffs_Device *dev = obj->myDev;
++	int ok = 1;
++	int nTnodeBytes = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++	if (tn) {
++		if (level > 0) {
++
++			for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++){
++				if (tn->internal[i]) {
++					ok = yaffs_VerifyTnodeWorker(obj,
++							tn->internal[i],
++							level - 1,
++							(chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
++				}
++			}
++		} else if (level == 0) {
++			int i;
++			yaffs_ExtendedTags tags;
++			__u32 objectId = obj->objectId;
++			
++			chunkOffset <<=  YAFFS_TNODES_LEVEL0_BITS;
++			
++			for(i = 0; i < YAFFS_NTNODES_LEVEL0; i++){
++				__u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++				
++				if(theChunk > 0){
++					/* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),tags.objectId,tags.chunkId,theChunk)); */
++					yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags);
++					if(tags.objectId != objectId || tags.chunkId != chunkOffset){
++						T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++							objectId, chunkOffset, theChunk,
++							tags.objectId, tags.chunkId));
++					}
++				}
++				chunkOffset++;
++			}
++		}
++	}
++
++	return ok;
++
++}
++
++
++static void yaffs_VerifyFile(yaffs_Object *obj)
++{
++	int requiredTallness;
++	int actualTallness;
++	__u32 lastChunk;
++	__u32 x;
++	__u32 i;
++	int ok;
++	yaffs_Device *dev;
++	yaffs_ExtendedTags tags;
++	yaffs_Tnode *tn;
++	__u32 objectId;
++	
++	if(obj && yaffs_SkipVerification(obj->myDev))
++		return;
++	
++	dev = obj->myDev;
++	objectId = obj->objectId;
++	
++	/* Check file size is consistent with tnode depth */
++	lastChunk =  obj->variant.fileVariant.fileSize / dev->nDataBytesPerChunk + 1;
++	x = lastChunk >> YAFFS_TNODES_LEVEL0_BITS;
++	requiredTallness = 0;
++	while (x> 0) {
++		x >>= YAFFS_TNODES_INTERNAL_BITS;
++		requiredTallness++;
++	}
++	
++	actualTallness = obj->variant.fileVariant.topLevel;
++	
++	if(requiredTallness > actualTallness )
++		T(YAFFS_TRACE_VERIFY,
++		(TSTR("Obj %d had tnode tallness %d, needs to be %d"TENDSTR),
++		 obj->objectId,actualTallness, requiredTallness));
++	
++	
++	/* Check that the chunks in the tnode tree are all correct. 
++	 * We do this by scanning through the tnode tree and
++	 * checking the tags for every chunk match.
++	 */
++
++	if(yaffs_SkipNANDVerification(dev))
++		return;
++		
++	for(i = 1; i <= lastChunk; i++){
++		tn = yaffs_FindLevel0Tnode(dev, &obj->variant.fileVariant,i);
++
++		if (tn) {
++			__u32 theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++			if(theChunk > 0){
++				/* T(~0,(TSTR("verifying (%d:%d) %d"TENDSTR),objectId,i,theChunk)); */
++				yaffs_ReadChunkWithTagsFromNAND(dev,theChunk,NULL, &tags);
++				if(tags.objectId != objectId || tags.chunkId != i){
++					T(~0,(TSTR("Object %d chunkId %d NAND mismatch chunk %d tags (%d:%d)"TENDSTR),
++						objectId, i, theChunk,
++						tags.objectId, tags.chunkId));
++				}
++			}
++		}
++
++	}
++
++}
++
++static void yaffs_VerifyDirectory(yaffs_Object *obj)
++{
++	if(obj && yaffs_SkipVerification(obj->myDev))
++		return;
++	
++}
++
++static void yaffs_VerifyHardLink(yaffs_Object *obj)
++{
++	if(obj && yaffs_SkipVerification(obj->myDev))
++		return;
++		
++	/* Verify sane equivalent object */
++}
++
++static void yaffs_VerifySymlink(yaffs_Object *obj)
++{
++	if(obj && yaffs_SkipVerification(obj->myDev))
++		return;
++		
++	/* Verify symlink string */
++}
++
++static void yaffs_VerifySpecial(yaffs_Object *obj)
++{
++	if(obj && yaffs_SkipVerification(obj->myDev))
++		return;
++}
++
++static void yaffs_VerifyObject(yaffs_Object *obj)
++{
++	yaffs_Device *dev;
++	
++	__u32 chunkMin;
++	__u32 chunkMax;
++	
++	__u32 chunkIdOk;
++	__u32 chunkIsLive;
++	
++	if(!obj)
++		return;
++	
++	dev = obj->myDev;
++	
++	if(yaffs_SkipVerification(dev))
++		return;
++		
++	/* Check sane object header chunk */
++	
++	chunkMin = dev->internalStartBlock * dev->nChunksPerBlock;
++	chunkMax = (dev->internalEndBlock+1) * dev->nChunksPerBlock - 1;
++	
++	chunkIdOk = (obj->chunkId >= chunkMin && obj->chunkId <= chunkMax);
++	chunkIsLive = chunkIdOk && 
++			yaffs_CheckChunkBit(dev, 
++					    obj->chunkId / dev->nChunksPerBlock,
++					    obj->chunkId % dev->nChunksPerBlock);
++	if(!obj->fake && 
++	    (!chunkIdOk || !chunkIsLive)) {
++	   T(YAFFS_TRACE_VERIFY,
++	   (TSTR("Obj %d has chunkId %d %s %s"TENDSTR),
++	   obj->objectId,obj->chunkId,
++	   chunkIdOk ? "" : ",out of range",
++	   chunkIsLive || !chunkIdOk ? "" : ",marked as deleted"));
++	}
++	
++	if(chunkIdOk && chunkIsLive &&!yaffs_SkipNANDVerification(dev)) {
++		yaffs_ExtendedTags tags;
++		yaffs_ObjectHeader *oh;
++		__u8 *buffer = yaffs_GetTempBuffer(dev,__LINE__);
++		
++		oh = (yaffs_ObjectHeader *)buffer;
++		
++		yaffs_ReadChunkWithTagsFromNAND(dev, obj->chunkId,buffer, &tags);
++		
++		yaffs_VerifyObjectHeader(obj,oh,&tags,1);
++		
++		yaffs_ReleaseTempBuffer(dev,buffer,__LINE__);
++	}
++	
++	/* Verify it has a parent */
++	if(obj && !obj->fake &&
++	   (!obj->parent || obj->parent->myDev != dev)){
++	   T(YAFFS_TRACE_VERIFY,
++	   (TSTR("Obj %d has parent pointer %p which does not look like an object"TENDSTR),
++	   obj->objectId,obj->parent));	   
++	}
++	
++	/* Verify parent is a directory */
++	if(obj->parent && obj->parent->variantType != YAFFS_OBJECT_TYPE_DIRECTORY){
++	   T(YAFFS_TRACE_VERIFY,
++	   (TSTR("Obj %d's parent is not a directory (type %d)"TENDSTR),
++	   obj->objectId,obj->parent->variantType));	   
++	}
++	
++	switch(obj->variantType){
++	case YAFFS_OBJECT_TYPE_FILE:
++		yaffs_VerifyFile(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		yaffs_VerifySymlink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		yaffs_VerifyDirectory(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		yaffs_VerifyHardLink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		yaffs_VerifySpecial(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++	default:
++		T(YAFFS_TRACE_VERIFY,
++		(TSTR("Obj %d has illegaltype %d"TENDSTR),
++		obj->objectId,obj->variantType));	   
++		break;
++	}
++	
++	
++}
++
++static void yaffs_VerifyObjects(yaffs_Device *dev)
++{
++	yaffs_Object *obj;
++	int i;
++	struct list_head *lh;
++
++	if(yaffs_SkipVerification(dev))
++		return;
++	
++	/* Iterate through the objects in each hash entry */
++	 
++	 for(i = 0; i <  YAFFS_NOBJECT_BUCKETS; i++){
++	 	list_for_each(lh, &dev->objectBucket[i].list) {
++			if (lh) {
++				obj = list_entry(lh, yaffs_Object, hashLink);
++				yaffs_VerifyObject(obj);
++			}
++		}
++	 }
++
++}
++
++
++/*
++ *  Simple hash function. Needs to have a reasonable spread
++ */
++ 
++static Y_INLINE int yaffs_HashFunction(int n)
++{
++	n = abs(n);
++	return (n % YAFFS_NOBJECT_BUCKETS);
++}
++
++/*
++ * Access functions to useful fake objects
++ */
++ 
++yaffs_Object *yaffs_Root(yaffs_Device * dev)
++{
++	return dev->rootDir;
++}
++
++yaffs_Object *yaffs_LostNFound(yaffs_Device * dev)
++{
++	return dev->lostNFoundDir;
++}
++
++
++/*
++ *  Erased NAND checking functions
++ */
++ 
++int yaffs_CheckFF(__u8 * buffer, int nBytes)
++{
++	/* Horrible, slow implementation */
++	while (nBytes--) {
++		if (*buffer != 0xFF)
++			return 0;
++		buffer++;
++	}
++	return 1;
++}
++
++static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
++				  int chunkInNAND)
++{
++
++	int retval = YAFFS_OK;
++	__u8 *data = yaffs_GetTempBuffer(dev, __LINE__);
++	yaffs_ExtendedTags tags;
++	int result;
++
++	result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
++	
++	if(tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
++		retval = YAFFS_FAIL;
++		
++
++	if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) {
++		T(YAFFS_TRACE_NANDACCESS,
++		  (TSTR("Chunk %d not erased" TENDSTR), chunkInNAND));
++		retval = YAFFS_FAIL;
++	}
++
++	yaffs_ReleaseTempBuffer(dev, data, __LINE__);
++
++	return retval;
++
++}
++
++
++static int yaffs_WriteNewChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
++					     const __u8 * data,
++					     yaffs_ExtendedTags * tags,
++					     int useReserve)
++{
++	int attempts = 0;
++	int writeOk = 0;
++	int chunk;
++
++	yaffs_InvalidateCheckpoint(dev);
++
++	do {
++		yaffs_BlockInfo *bi = 0;
++		int erasedOk = 0;
++
++		chunk = yaffs_AllocateChunk(dev, useReserve, &bi);
++		if (chunk < 0) {
++			/* no space */
++			break;
++		}
++
++		/* First check this chunk is erased, if it needs
++		 * checking.  The checking policy (unless forced
++		 * always on) is as follows:
++		 *
++		 * Check the first page we try to write in a block.
++		 * If the check passes then we don't need to check any
++		 * more.	If the check fails, we check again...
++		 * If the block has been erased, we don't need to check.
++		 *
++		 * However, if the block has been prioritised for gc,
++		 * then we think there might be something odd about
++		 * this block and stop using it.
++		 *
++		 * Rationale: We should only ever see chunks that have
++		 * not been erased if there was a partially written
++		 * chunk due to power loss.  This checking policy should
++		 * catch that case with very few checks and thus save a
++		 * lot of checks that are most likely not needed.
++		 */
++		if (bi->gcPrioritise) {
++			yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++			/* try another chunk */
++			continue;
++		}
++
++		/* let's give it a try */
++		attempts++;
++
++#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++		bi->skipErasedCheck = 0;
++#endif
++		if (!bi->skipErasedCheck) {
++			erasedOk = yaffs_CheckChunkErased(dev, chunk);
++			if (erasedOk != YAFFS_OK) {
++				T(YAFFS_TRACE_ERROR,
++				(TSTR ("**>> yaffs chunk %d was not erased"
++				TENDSTR), chunk));
++
++				/* try another chunk */
++				continue;
++			}
++			bi->skipErasedCheck = 1;
++		}
++
++		writeOk = yaffs_WriteChunkWithTagsToNAND(dev, chunk,
++				data, tags);
++		if (writeOk != YAFFS_OK) {
++			yaffs_HandleWriteChunkError(dev, chunk, erasedOk);
++			/* try another chunk */
++			continue;
++		}
++
++		/* Copy the data into the robustification buffer */
++		yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
++
++	} while (writeOk != YAFFS_OK && attempts < yaffs_wr_attempts);
++
++	if (attempts > 1) {
++		T(YAFFS_TRACE_ERROR,
++			(TSTR("**>> yaffs write required %d attempts" TENDSTR),
++			attempts));
++
++		dev->nRetriedWrites += (attempts - 1);
++	}
++
++	return chunk;
++}
++
++/*
++ * Block retiring for handling a broken block.
++ */
++ 
++static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND)
++{
++	yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
++
++	yaffs_InvalidateCheckpoint(dev);
++	
++	yaffs_MarkBlockBad(dev, blockInNAND);
++
++	bi->blockState = YAFFS_BLOCK_STATE_DEAD;
++	bi->gcPrioritise = 0;
++	bi->needsRetiring = 0;
++
++	dev->nRetiredBlocks++;
++}
++
++/*
++ * Functions for robustisizing TODO
++ *
++ */
++ 
++static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
++				     const __u8 * data,
++				     const yaffs_ExtendedTags * tags)
++{
++}
++
++static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
++				    const yaffs_ExtendedTags * tags)
++{
++}
++
++void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
++{
++	if(!bi->gcPrioritise){
++		bi->gcPrioritise = 1;
++		dev->hasPendingPrioritisedGCs = 1;
++		bi->chunkErrorStrikes ++;
++		
++		if(bi->chunkErrorStrikes > 3){
++			bi->needsRetiring = 1; /* Too many stikes, so retire this */
++			T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
++
++		}
++		
++	}
++}
++
++static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk)
++{
++
++	int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++	yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
++
++	yaffs_HandleChunkError(dev,bi);
++		
++	
++	if(erasedOk ) {
++		/* Was an actual write failure, so mark the block for retirement  */
++		bi->needsRetiring = 1;
++		T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++		  (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND));
++
++		
++	}
++	
++	/* Delete the chunk */
++	yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
++}
++
++
++/*---------------- Name handling functions ------------*/ 
++
++static __u16 yaffs_CalcNameSum(const YCHAR * name)
++{
++	__u16 sum = 0;
++	__u16 i = 1;
++
++	YUCHAR *bname = (YUCHAR *) name;
++	if (bname) {
++		while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH/2))) {
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++			sum += yaffs_toupper(*bname) * i;
++#else
++			sum += (*bname) * i;
++#endif
++			i++;
++			bname++;
++		}
++	}
++	return sum;
++}
++
++static void yaffs_SetObjectName(yaffs_Object * obj, const YCHAR * name)
++{
++#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++	if (name && yaffs_strlen(name) <= YAFFS_SHORT_NAME_LENGTH) {
++		yaffs_strcpy(obj->shortName, name);
++	} else {
++		obj->shortName[0] = _Y('\0');
++	}
++#endif
++	obj->sum = yaffs_CalcNameSum(name);
++}
++
++/*-------------------- TNODES -------------------
++
++ * List of spare tnodes
++ * The list is hooked together using the first pointer
++ * in the tnode.
++ */
++ 
++/* yaffs_CreateTnodes creates a bunch more tnodes and
++ * adds them to the tnode free list.
++ * Don't use this function directly
++ */
++
++static int yaffs_CreateTnodes(yaffs_Device * dev, int nTnodes)
++{
++	int i;
++	int tnodeSize;
++	yaffs_Tnode *newTnodes;
++	__u8 *mem;
++	yaffs_Tnode *curr;
++	yaffs_Tnode *next;
++	yaffs_TnodeList *tnl;
++
++	if (nTnodes < 1)
++		return YAFFS_OK;
++		
++	/* Calculate the tnode size in bytes for variable width tnode support.
++	 * Must be a multiple of 32-bits  */
++	tnodeSize = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++	/* make these things */
++
++	newTnodes = YMALLOC(nTnodes * tnodeSize);
++	mem = (__u8 *)newTnodes;
++
++	if (!newTnodes) {
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR("yaffs: Could not allocate Tnodes" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	/* Hook them into the free list */
++#if 0
++	for (i = 0; i < nTnodes - 1; i++) {
++		newTnodes[i].internal[0] = &newTnodes[i + 1];
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++		newTnodes[i].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
++#endif
++	}
++
++	newTnodes[nTnodes - 1].internal[0] = dev->freeTnodes;
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++	newTnodes[nTnodes - 1].internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
++#endif
++	dev->freeTnodes = newTnodes;
++#else
++	/* New hookup for wide tnodes */
++	for(i = 0; i < nTnodes -1; i++) {
++		curr = (yaffs_Tnode *) &mem[i * tnodeSize];
++		next = (yaffs_Tnode *) &mem[(i+1) * tnodeSize];
++		curr->internal[0] = next;
++	}
++	
++	curr = (yaffs_Tnode *) &mem[(nTnodes - 1) * tnodeSize];
++	curr->internal[0] = dev->freeTnodes;
++	dev->freeTnodes = (yaffs_Tnode *)mem;
++
++#endif
++
++
++	dev->nFreeTnodes += nTnodes;
++	dev->nTnodesCreated += nTnodes;
++
++	/* Now add this bunch of tnodes to a list for freeing up.
++	 * NB If we can't add this to the management list it isn't fatal
++	 * but it just means we can't free this bunch of tnodes later.
++	 */
++	 
++	tnl = YMALLOC(sizeof(yaffs_TnodeList));
++	if (!tnl) {
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR
++		   ("yaffs: Could not add tnodes to management list" TENDSTR)));
++		   return YAFFS_FAIL;
++
++	} else {
++		tnl->tnodes = newTnodes;
++		tnl->next = dev->allocatedTnodeList;
++		dev->allocatedTnodeList = tnl;
++	}
++
++	T(YAFFS_TRACE_ALLOCATE, (TSTR("yaffs: Tnodes added" TENDSTR)));
++
++	return YAFFS_OK;
++}
++
++/* GetTnode gets us a clean tnode. Tries to make allocate more if we run out */
++
++static yaffs_Tnode *yaffs_GetTnodeRaw(yaffs_Device * dev)
++{
++	yaffs_Tnode *tn = NULL;
++
++	/* If there are none left make more */
++	if (!dev->freeTnodes) {
++		yaffs_CreateTnodes(dev, YAFFS_ALLOCATION_NTNODES);
++	}
++
++	if (dev->freeTnodes) {
++		tn = dev->freeTnodes;
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++		if (tn->internal[YAFFS_NTNODES_INTERNAL] != (void *)1) {
++			/* Hoosterman, this thing looks like it isn't in the list */
++			T(YAFFS_TRACE_ALWAYS,
++			  (TSTR("yaffs: Tnode list bug 1" TENDSTR)));
++		}
++#endif
++		dev->freeTnodes = dev->freeTnodes->internal[0];
++		dev->nFreeTnodes--;
++	}
++
++	return tn;
++}
++
++static yaffs_Tnode *yaffs_GetTnode(yaffs_Device * dev)
++{
++	yaffs_Tnode *tn = yaffs_GetTnodeRaw(dev);
++	
++	if(tn)
++		memset(tn, 0, (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++
++	return tn;	
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++static void yaffs_FreeTnode(yaffs_Device * dev, yaffs_Tnode * tn)
++{
++	if (tn) {
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++		if (tn->internal[YAFFS_NTNODES_INTERNAL] != 0) {
++			/* Hoosterman, this thing looks like it is already in the list */
++			T(YAFFS_TRACE_ALWAYS,
++			  (TSTR("yaffs: Tnode list bug 2" TENDSTR)));
++		}
++		tn->internal[YAFFS_NTNODES_INTERNAL] = (void *)1;
++#endif
++		tn->internal[0] = dev->freeTnodes;
++		dev->freeTnodes = tn;
++		dev->nFreeTnodes++;
++	}
++}
++
++static void yaffs_DeinitialiseTnodes(yaffs_Device * dev)
++{
++	/* Free the list of allocated tnodes */
++	yaffs_TnodeList *tmp;
++
++	while (dev->allocatedTnodeList) {
++		tmp = dev->allocatedTnodeList->next;
++
++		YFREE(dev->allocatedTnodeList->tnodes);
++		YFREE(dev->allocatedTnodeList);
++		dev->allocatedTnodeList = tmp;
++
++	}
++
++	dev->freeTnodes = NULL;
++	dev->nFreeTnodes = 0;
++}
++
++static void yaffs_InitialiseTnodes(yaffs_Device * dev)
++{
++	dev->allocatedTnodeList = NULL;
++	dev->freeTnodes = NULL;
++	dev->nFreeTnodes = 0;
++	dev->nTnodesCreated = 0;
++
++}
++
++
++void yaffs_PutLevel0Tnode(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos, unsigned val)
++{
++  __u32 *map = (__u32 *)tn;
++  __u32 bitInMap;
++  __u32 bitInWord;
++  __u32 wordInMap;
++  __u32 mask;
++  
++  pos &= YAFFS_TNODES_LEVEL0_MASK;
++  val >>= dev->chunkGroupBits;
++  
++  bitInMap = pos * dev->tnodeWidth;
++  wordInMap = bitInMap /32;
++  bitInWord = bitInMap & (32 -1);
++  
++  mask = dev->tnodeMask << bitInWord;
++  
++  map[wordInMap] &= ~mask;
++  map[wordInMap] |= (mask & (val << bitInWord));
++  
++  if(dev->tnodeWidth > (32-bitInWord)) {
++    bitInWord = (32 - bitInWord);
++    wordInMap++;;
++    mask = dev->tnodeMask >> (/*dev->tnodeWidth -*/ bitInWord);
++    map[wordInMap] &= ~mask;
++    map[wordInMap] |= (mask & (val >> bitInWord));
++  }
++}
++
++static __u32 yaffs_GetChunkGroupBase(yaffs_Device *dev, yaffs_Tnode *tn, unsigned pos)
++{
++  __u32 *map = (__u32 *)tn;
++  __u32 bitInMap;
++  __u32 bitInWord;
++  __u32 wordInMap;
++  __u32 val;
++  
++  pos &= YAFFS_TNODES_LEVEL0_MASK;
++  
++  bitInMap = pos * dev->tnodeWidth;
++  wordInMap = bitInMap /32;
++  bitInWord = bitInMap & (32 -1);
++  
++  val = map[wordInMap] >> bitInWord;
++  
++  if(dev->tnodeWidth > (32-bitInWord)) {
++    bitInWord = (32 - bitInWord);
++    wordInMap++;;
++    val |= (map[wordInMap] << bitInWord);
++  }
++  
++  val &= dev->tnodeMask;
++  val <<= dev->chunkGroupBits;
++  
++  return val;
++}
++
++/* ------------------- End of individual tnode manipulation -----------------*/
++
++/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
++ * The look up tree is represented by the top tnode and the number of topLevel
++ * in the tree. 0 means only the level 0 tnode is in the tree.
++ */
++
++/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
++static yaffs_Tnode *yaffs_FindLevel0Tnode(yaffs_Device * dev,
++					  yaffs_FileStructure * fStruct,
++					  __u32 chunkId)
++{
++
++	yaffs_Tnode *tn = fStruct->top;
++	__u32 i;
++	int requiredTallness;
++	int level = fStruct->topLevel;
++
++	/* Check sane level and chunk Id */
++	if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) {
++		return NULL;
++	}
++
++	if (chunkId > YAFFS_MAX_CHUNK_ID) {
++		return NULL;
++	}
++
++	/* First check we're tall enough (ie enough topLevel) */
++
++	i = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
++	requiredTallness = 0;
++	while (i) {
++		i >>= YAFFS_TNODES_INTERNAL_BITS;
++		requiredTallness++;
++	}
++
++	if (requiredTallness > fStruct->topLevel) {
++		/* Not tall enough, so we can't find it, return NULL. */
++		return NULL;
++	}
++
++	/* Traverse down to level 0 */
++	while (level > 0 && tn) {
++		tn = tn->
++		    internal[(chunkId >>
++			       ( YAFFS_TNODES_LEVEL0_BITS + 
++			         (level - 1) *
++			         YAFFS_TNODES_INTERNAL_BITS)
++			      ) &
++			     YAFFS_TNODES_INTERNAL_MASK];
++		level--;
++
++	}
++
++	return tn;
++}
++
++/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
++ * This happens in two steps:
++ *  1. If the tree isn't tall enough, then make it taller.
++ *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
++ *
++ * Used when modifying the tree.
++ *
++ *  If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
++ *  be plugged into the ttree.
++ */
++ 
++static yaffs_Tnode *yaffs_AddOrFindLevel0Tnode(yaffs_Device * dev,
++					       yaffs_FileStructure * fStruct,
++					       __u32 chunkId,
++					       yaffs_Tnode *passedTn)
++{
++
++	int requiredTallness;
++	int i;
++	int l;
++	yaffs_Tnode *tn;
++
++	__u32 x;
++
++
++	/* Check sane level and page Id */
++	if (fStruct->topLevel < 0 || fStruct->topLevel > YAFFS_TNODES_MAX_LEVEL) {
++		return NULL;
++	}
++
++	if (chunkId > YAFFS_MAX_CHUNK_ID) {
++		return NULL;
++	}
++
++	/* First check we're tall enough (ie enough topLevel) */
++
++	x = chunkId >> YAFFS_TNODES_LEVEL0_BITS;
++	requiredTallness = 0;
++	while (x) {
++		x >>= YAFFS_TNODES_INTERNAL_BITS;
++		requiredTallness++;
++	}
++
++
++	if (requiredTallness > fStruct->topLevel) {
++		/* Not tall enough,gotta make the tree taller */
++		for (i = fStruct->topLevel; i < requiredTallness; i++) {
++		
++			tn = yaffs_GetTnode(dev);
++
++			if (tn) {
++				tn->internal[0] = fStruct->top;
++				fStruct->top = tn;
++			} else {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR("yaffs: no more tnodes" TENDSTR)));
++			}
++		}
++
++		fStruct->topLevel = requiredTallness;
++	}
++
++	/* Traverse down to level 0, adding anything we need */
++
++	l = fStruct->topLevel;
++	tn = fStruct->top;
++	
++	if(l > 0) {
++		while (l > 0 && tn) {
++			x = (chunkId >>
++			     ( YAFFS_TNODES_LEVEL0_BITS +
++			      (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
++			    YAFFS_TNODES_INTERNAL_MASK;
++
++
++			if((l>1) && !tn->internal[x]){
++				/* Add missing non-level-zero tnode */
++				tn->internal[x] = yaffs_GetTnode(dev);
++
++			} else if(l == 1) {
++				/* Looking from level 1 at level 0 */
++			 	if (passedTn) {
++					/* If we already have one, then release it.*/
++					if(tn->internal[x])
++						yaffs_FreeTnode(dev,tn->internal[x]);
++					tn->internal[x] = passedTn;
++			
++				} else if(!tn->internal[x]) {
++					/* Don't have one, none passed in */
++					tn->internal[x] = yaffs_GetTnode(dev);
++				}
++			}
++		
++			tn = tn->internal[x];
++			l--;
++		}
++	} else {
++		/* top is level 0 */
++		if(passedTn) {
++			memcpy(tn,passedTn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++			yaffs_FreeTnode(dev,passedTn);
++		}
++	}
++
++	return tn;
++}
++
++static int yaffs_FindChunkInGroup(yaffs_Device * dev, int theChunk,
++				  yaffs_ExtendedTags * tags, int objectId,
++				  int chunkInInode)
++{
++	int j;
++
++	for (j = 0; theChunk && j < dev->chunkGroupSize; j++) {
++		if (yaffs_CheckChunkBit
++		    (dev, theChunk / dev->nChunksPerBlock,
++		     theChunk % dev->nChunksPerBlock)) {
++			yaffs_ReadChunkWithTagsFromNAND(dev, theChunk, NULL,
++							tags);
++			if (yaffs_TagsMatch(tags, objectId, chunkInInode)) {
++				/* found it; */
++				return theChunk;
++
++			}
++		}
++		theChunk++;
++	}
++	return -1;
++}
++
++
++/* DeleteWorker scans backwards through the tnode tree and deletes all the
++ * chunks and tnodes in the file
++ * Returns 1 if the tree was deleted. 
++ * Returns 0 if it stopped early due to hitting the limit and the delete is incomplete.
++ */
++
++static int yaffs_DeleteWorker(yaffs_Object * in, yaffs_Tnode * tn, __u32 level,
++			      int chunkOffset, int *limit)
++{
++	int i;
++	int chunkInInode;
++	int theChunk;
++	yaffs_ExtendedTags tags;
++	int foundChunk;
++	yaffs_Device *dev = in->myDev;
++
++	int allDone = 1;
++
++	if (tn) {
++		if (level > 0) {
++
++			for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
++			     i--) {
++				if (tn->internal[i]) {
++					if (limit && (*limit) < 0) {
++						allDone = 0;
++					} else {
++						allDone =
++						    yaffs_DeleteWorker(in,
++								       tn->
++								       internal
++								       [i],
++								       level -
++								       1,
++								       (chunkOffset
++									<<
++									YAFFS_TNODES_INTERNAL_BITS)
++								       + i,
++								       limit);
++					}
++					if (allDone) {
++						yaffs_FreeTnode(dev,
++								tn->
++								internal[i]);
++						tn->internal[i] = NULL;
++					}
++				}
++
++			}
++			return (allDone) ? 1 : 0;
++		} else if (level == 0) {
++			int hitLimit = 0;
++
++			for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0 && !hitLimit;
++			     i--) {
++			        theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++				if (theChunk) {
++
++					chunkInInode =
++					    (chunkOffset <<
++					     YAFFS_TNODES_LEVEL0_BITS) + i;
++
++					foundChunk =
++					    yaffs_FindChunkInGroup(dev,
++								   theChunk,
++								   &tags,
++								   in->objectId,
++								   chunkInInode);
++
++					if (foundChunk > 0) {
++						yaffs_DeleteChunk(dev,
++								  foundChunk, 1,
++								  __LINE__);
++						in->nDataChunks--;
++						if (limit) {
++							*limit = *limit - 1;
++							if (*limit <= 0) {
++								hitLimit = 1;
++							}
++						}
++
++					}
++
++					yaffs_PutLevel0Tnode(dev,tn,i,0);
++				}
++
++			}
++			return (i < 0) ? 1 : 0;
++
++		}
++
++	}
++
++	return 1;
++
++}
++
++static void yaffs_SoftDeleteChunk(yaffs_Device * dev, int chunk)
++{
++
++	yaffs_BlockInfo *theBlock;
++
++	T(YAFFS_TRACE_DELETION, (TSTR("soft delete chunk %d" TENDSTR), chunk));
++
++	theBlock = yaffs_GetBlockInfo(dev, chunk / dev->nChunksPerBlock);
++	if (theBlock) {
++		theBlock->softDeletions++;
++		dev->nFreeChunks++;
++	}
++}
++
++/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
++ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
++ * of the tnode.
++ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
++ */
++ 
++static int yaffs_SoftDeleteWorker(yaffs_Object * in, yaffs_Tnode * tn,
++				  __u32 level, int chunkOffset)
++{
++	int i;
++	int theChunk;
++	int allDone = 1;
++	yaffs_Device *dev = in->myDev;
++
++	if (tn) {
++		if (level > 0) {
++
++			for (i = YAFFS_NTNODES_INTERNAL - 1; allDone && i >= 0;
++			     i--) {
++				if (tn->internal[i]) {
++					allDone =
++					    yaffs_SoftDeleteWorker(in,
++								   tn->
++								   internal[i],
++								   level - 1,
++								   (chunkOffset
++								    <<
++								    YAFFS_TNODES_INTERNAL_BITS)
++								   + i);
++					if (allDone) {
++						yaffs_FreeTnode(dev,
++								tn->
++								internal[i]);
++						tn->internal[i] = NULL;
++					} else {
++						/* Hoosterman... how could this happen? */
++					}
++				}
++			}
++			return (allDone) ? 1 : 0;
++		} else if (level == 0) {
++
++			for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
++				theChunk = yaffs_GetChunkGroupBase(dev,tn,i);
++				if (theChunk) {
++					/* Note this does not find the real chunk, only the chunk group.
++					 * We make an assumption that a chunk group is not larger than 
++					 * a block.
++					 */
++					yaffs_SoftDeleteChunk(dev, theChunk);
++					yaffs_PutLevel0Tnode(dev,tn,i,0);
++				}
++
++			}
++			return 1;
++
++		}
++
++	}
++
++	return 1;
++
++}
++
++static void yaffs_SoftDeleteFile(yaffs_Object * obj)
++{
++	if (obj->deleted &&
++	    obj->variantType == YAFFS_OBJECT_TYPE_FILE && !obj->softDeleted) {
++		if (obj->nDataChunks <= 0) {
++			/* Empty file with no duplicate object headers, just delete it immediately */
++			yaffs_FreeTnode(obj->myDev,
++					obj->variant.fileVariant.top);
++			obj->variant.fileVariant.top = NULL;
++			T(YAFFS_TRACE_TRACING,
++			  (TSTR("yaffs: Deleting empty file %d" TENDSTR),
++			   obj->objectId));
++			yaffs_DoGenericObjectDeletion(obj);
++		} else {
++			yaffs_SoftDeleteWorker(obj,
++					       obj->variant.fileVariant.top,
++					       obj->variant.fileVariant.
++					       topLevel, 0);
++			obj->softDeleted = 1;
++		}
++	}
++}
++
++/* Pruning removes any part of the file structure tree that is beyond the
++ * bounds of the file (ie that does not point to chunks).
++ *
++ * A file should only get pruned when its size is reduced.
++ *
++ * Before pruning, the chunks must be pulled from the tree and the
++ * level 0 tnode entries must be zeroed out.
++ * Could also use this for file deletion, but that's probably better handled
++ * by a special case.
++ */
++
++static yaffs_Tnode *yaffs_PruneWorker(yaffs_Device * dev, yaffs_Tnode * tn,
++				      __u32 level, int del0)
++{
++	int i;
++	int hasData;
++
++	if (tn) {
++		hasData = 0;
++
++		for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
++			if (tn->internal[i] && level > 0) {
++				tn->internal[i] =
++				    yaffs_PruneWorker(dev, tn->internal[i],
++						      level - 1,
++						      (i == 0) ? del0 : 1);
++			}
++
++			if (tn->internal[i]) {
++				hasData++;
++			}
++		}
++
++		if (hasData == 0 && del0) {
++			/* Free and return NULL */
++
++			yaffs_FreeTnode(dev, tn);
++			tn = NULL;
++		}
++
++	}
++
++	return tn;
++
++}
++
++static int yaffs_PruneFileStructure(yaffs_Device * dev,
++				    yaffs_FileStructure * fStruct)
++{
++	int i;
++	int hasData;
++	int done = 0;
++	yaffs_Tnode *tn;
++
++	if (fStruct->topLevel > 0) {
++		fStruct->top =
++		    yaffs_PruneWorker(dev, fStruct->top, fStruct->topLevel, 0);
++
++		/* Now we have a tree with all the non-zero branches NULL but the height
++		 * is the same as it was.
++		 * Let's see if we can trim internal tnodes to shorten the tree.
++		 * We can do this if only the 0th element in the tnode is in use 
++		 * (ie all the non-zero are NULL)
++		 */
++
++		while (fStruct->topLevel && !done) {
++			tn = fStruct->top;
++
++			hasData = 0;
++			for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
++				if (tn->internal[i]) {
++					hasData++;
++				}
++			}
++
++			if (!hasData) {
++				fStruct->top = tn->internal[0];
++				fStruct->topLevel--;
++				yaffs_FreeTnode(dev, tn);
++			} else {
++				done = 1;
++			}
++		}
++	}
++
++	return YAFFS_OK;
++}
++
++/*-------------------- End of File Structure functions.-------------------*/
++
++/* yaffs_CreateFreeObjects creates a bunch more objects and
++ * adds them to the object free list.
++ */
++static int yaffs_CreateFreeObjects(yaffs_Device * dev, int nObjects)
++{
++	int i;
++	yaffs_Object *newObjects;
++	yaffs_ObjectList *list;
++
++	if (nObjects < 1)
++		return YAFFS_OK;
++
++	/* make these things */
++	newObjects = YMALLOC(nObjects * sizeof(yaffs_Object));
++	list = YMALLOC(sizeof(yaffs_ObjectList));
++
++	if (!newObjects || !list) {
++		if(newObjects)
++			YFREE(newObjects);
++		if(list)
++			YFREE(list);
++		T(YAFFS_TRACE_ALLOCATE,
++		  (TSTR("yaffs: Could not allocate more objects" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++	
++	/* Hook them into the free list */
++	for (i = 0; i < nObjects - 1; i++) {
++		newObjects[i].siblings.next =
++		    (struct list_head *)(&newObjects[i + 1]);
++	}
++
++	newObjects[nObjects - 1].siblings.next = (void *)dev->freeObjects;
++	dev->freeObjects = newObjects;
++	dev->nFreeObjects += nObjects;
++	dev->nObjectsCreated += nObjects;
++
++	/* Now add this bunch of Objects to a list for freeing up. */
++
++	list->objects = newObjects;
++	list->next = dev->allocatedObjectList;
++	dev->allocatedObjectList = list;
++
++	return YAFFS_OK;
++}
++
++
++/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
++static yaffs_Object *yaffs_AllocateEmptyObject(yaffs_Device * dev)
++{
++	yaffs_Object *tn = NULL;
++
++	/* If there are none left make more */
++	if (!dev->freeObjects) {
++		yaffs_CreateFreeObjects(dev, YAFFS_ALLOCATION_NOBJECTS);
++	}
++
++	if (dev->freeObjects) {
++		tn = dev->freeObjects;
++		dev->freeObjects =
++		    (yaffs_Object *) (dev->freeObjects->siblings.next);
++		dev->nFreeObjects--;
++
++		/* Now sweeten it up... */
++
++		memset(tn, 0, sizeof(yaffs_Object));
++		tn->myDev = dev;
++		tn->chunkId = -1;
++		tn->variantType = YAFFS_OBJECT_TYPE_UNKNOWN;
++		INIT_LIST_HEAD(&(tn->hardLinks));
++		INIT_LIST_HEAD(&(tn->hashLink));
++		INIT_LIST_HEAD(&tn->siblings);
++
++		/* Add it to the lost and found directory.
++		 * NB Can't put root or lostNFound in lostNFound so
++		 * check if lostNFound exists first
++		 */
++		if (dev->lostNFoundDir) {
++			yaffs_AddObjectToDirectory(dev->lostNFoundDir, tn);
++		}
++	}
++
++	return tn;
++}
++
++static yaffs_Object *yaffs_CreateFakeDirectory(yaffs_Device * dev, int number,
++					       __u32 mode)
++{
++
++	yaffs_Object *obj =
++	    yaffs_CreateNewObject(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
++	if (obj) {
++		obj->fake = 1;		/* it is fake so it has no NAND presence... */
++		obj->renameAllowed = 0;	/* ... and we're not allowed to rename it... */
++		obj->unlinkAllowed = 0;	/* ... or unlink it */
++		obj->deleted = 0;
++		obj->unlinked = 0;
++		obj->yst_mode = mode;
++		obj->myDev = dev;
++		obj->chunkId = 0;	/* Not a valid chunk. */
++	}
++
++	return obj;
++
++}
++
++static void yaffs_UnhashObject(yaffs_Object * tn)
++{
++	int bucket;
++	yaffs_Device *dev = tn->myDev;
++
++	/* If it is still linked into the bucket list, free from the list */
++	if (!list_empty(&tn->hashLink)) {
++		list_del_init(&tn->hashLink);
++		bucket = yaffs_HashFunction(tn->objectId);
++		dev->objectBucket[bucket].count--;
++	}
++
++}
++
++/*  FreeObject frees up a Object and puts it back on the free list */
++static void yaffs_FreeObject(yaffs_Object * tn)
++{
++
++	yaffs_Device *dev = tn->myDev;
++
++#ifdef  __KERNEL__
++	if (tn->myInode) {
++		/* We're still hooked up to a cached inode.
++		 * Don't delete now, but mark for later deletion
++		 */
++		tn->deferedFree = 1;
++		return;
++	}
++#endif
++
++	yaffs_UnhashObject(tn);
++
++	/* Link into the free list. */
++	tn->siblings.next = (struct list_head *)(dev->freeObjects);
++	dev->freeObjects = tn;
++	dev->nFreeObjects++;
++}
++
++#ifdef __KERNEL__
++
++void yaffs_HandleDeferedFree(yaffs_Object * obj)
++{
++	if (obj->deferedFree) {
++		yaffs_FreeObject(obj);
++	}
++}
++
++#endif
++
++static void yaffs_DeinitialiseObjects(yaffs_Device * dev)
++{
++	/* Free the list of allocated Objects */
++
++	yaffs_ObjectList *tmp;
++
++	while (dev->allocatedObjectList) {
++		tmp = dev->allocatedObjectList->next;
++		YFREE(dev->allocatedObjectList->objects);
++		YFREE(dev->allocatedObjectList);
++
++		dev->allocatedObjectList = tmp;
++	}
++
++	dev->freeObjects = NULL;
++	dev->nFreeObjects = 0;
++}
++
++static void yaffs_InitialiseObjects(yaffs_Device * dev)
++{
++	int i;
++
++	dev->allocatedObjectList = NULL;
++	dev->freeObjects = NULL;
++	dev->nFreeObjects = 0;
++
++	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++		INIT_LIST_HEAD(&dev->objectBucket[i].list);
++		dev->objectBucket[i].count = 0;
++	}
++
++}
++
++static int yaffs_FindNiceObjectBucket(yaffs_Device * dev)
++{
++	static int x = 0;
++	int i;
++	int l = 999;
++	int lowest = 999999;
++
++	/* First let's see if we can find one that's empty. */
++
++	for (i = 0; i < 10 && lowest > 0; i++) {
++		x++;
++		x %= YAFFS_NOBJECT_BUCKETS;
++		if (dev->objectBucket[x].count < lowest) {
++			lowest = dev->objectBucket[x].count;
++			l = x;
++		}
++
++	}
++
++	/* If we didn't find an empty list, then try
++	 * looking a bit further for a short one
++	 */
++
++	for (i = 0; i < 10 && lowest > 3; i++) {
++		x++;
++		x %= YAFFS_NOBJECT_BUCKETS;
++		if (dev->objectBucket[x].count < lowest) {
++			lowest = dev->objectBucket[x].count;
++			l = x;
++		}
++
++	}
++
++	return l;
++}
++
++static int yaffs_CreateNewObjectNumber(yaffs_Device * dev)
++{
++	int bucket = yaffs_FindNiceObjectBucket(dev);
++
++	/* Now find an object value that has not already been taken
++	 * by scanning the list.
++	 */
++
++	int found = 0;
++	struct list_head *i;
++
++	__u32 n = (__u32) bucket;
++
++	/* yaffs_CheckObjectHashSanity();  */
++
++	while (!found) {
++		found = 1;
++		n += YAFFS_NOBJECT_BUCKETS;
++		if (1 || dev->objectBucket[bucket].count > 0) {
++			list_for_each(i, &dev->objectBucket[bucket].list) {
++				/* If there is already one in the list */
++				if (i
++				    && list_entry(i, yaffs_Object,
++						  hashLink)->objectId == n) {
++					found = 0;
++				}
++			}
++		}
++	}
++
++
++	return n;
++}
++
++static void yaffs_HashObject(yaffs_Object * in)
++{
++	int bucket = yaffs_HashFunction(in->objectId);
++	yaffs_Device *dev = in->myDev;
++
++	list_add(&in->hashLink, &dev->objectBucket[bucket].list);
++	dev->objectBucket[bucket].count++;
++
++}
++
++yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number)
++{
++	int bucket = yaffs_HashFunction(number);
++	struct list_head *i;
++	yaffs_Object *in;
++
++	list_for_each(i, &dev->objectBucket[bucket].list) {
++		/* Look if it is in the list */
++		if (i) {
++			in = list_entry(i, yaffs_Object, hashLink);
++			if (in->objectId == number) {
++#ifdef __KERNEL__
++				/* Don't tell the VFS about this one if it is defered free */
++				if (in->deferedFree)
++					return NULL;
++#endif
++
++				return in;
++			}
++		}
++	}
++
++	return NULL;
++}
++
++yaffs_Object *yaffs_CreateNewObject(yaffs_Device * dev, int number,
++				    yaffs_ObjectType type)
++{
++
++	yaffs_Object *theObject;
++	yaffs_Tnode *tn;
++
++	if (number < 0) {
++		number = yaffs_CreateNewObjectNumber(dev);
++	}
++
++	theObject = yaffs_AllocateEmptyObject(dev);
++	if(!theObject)
++		return NULL;
++		
++	if(type == YAFFS_OBJECT_TYPE_FILE){
++		tn = yaffs_GetTnode(dev);
++		if(!tn){
++			yaffs_FreeObject(theObject);
++			return NULL;
++		}
++	}
++		
++	
++
++	if (theObject) {
++		theObject->fake = 0;
++		theObject->renameAllowed = 1;
++		theObject->unlinkAllowed = 1;
++		theObject->objectId = number;
++		yaffs_HashObject(theObject);
++		theObject->variantType = type;
++#ifdef CONFIG_YAFFS_WINCE
++		yfsd_WinFileTimeNow(theObject->win_atime);
++		theObject->win_ctime[0] = theObject->win_mtime[0] =
++		    theObject->win_atime[0];
++		theObject->win_ctime[1] = theObject->win_mtime[1] =
++		    theObject->win_atime[1];
++
++#else
++
++		theObject->yst_atime = theObject->yst_mtime =
++		    theObject->yst_ctime = Y_CURRENT_TIME;
++#endif
++		switch (type) {
++		case YAFFS_OBJECT_TYPE_FILE:
++			theObject->variant.fileVariant.fileSize = 0;
++			theObject->variant.fileVariant.scannedFileSize = 0;
++			theObject->variant.fileVariant.shrinkSize = 0xFFFFFFFF;	/* max __u32 */
++			theObject->variant.fileVariant.topLevel = 0;
++			theObject->variant.fileVariant.top = tn;
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			INIT_LIST_HEAD(&theObject->variant.directoryVariant.
++				       children);
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++			/* No action required */
++			break;
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++			/* todo this should not happen */
++			break;
++		}
++	}
++
++	return theObject;
++}
++
++static yaffs_Object *yaffs_FindOrCreateObjectByNumber(yaffs_Device * dev,
++						      int number,
++						      yaffs_ObjectType type)
++{
++	yaffs_Object *theObject = NULL;
++
++	if (number > 0) {
++		theObject = yaffs_FindObjectByNumber(dev, number);
++	}
++
++	if (!theObject) {
++		theObject = yaffs_CreateNewObject(dev, number, type);
++	}
++
++	return theObject;
++
++}
++			
++
++static YCHAR *yaffs_CloneString(const YCHAR * str)
++{
++	YCHAR *newStr = NULL;
++
++	if (str && *str) {
++		newStr = YMALLOC((yaffs_strlen(str) + 1) * sizeof(YCHAR));
++		if(newStr)
++			yaffs_strcpy(newStr, str);
++	}
++
++	return newStr;
++
++}
++
++/*
++ * Mknod (create) a new object.
++ * equivalentObject only has meaning for a hard link;
++ * aliasString only has meaning for a sumlink.
++ * rdev only has meaning for devices (a subset of special objects)
++ */
++ 
++static yaffs_Object *yaffs_MknodObject(yaffs_ObjectType type,
++				       yaffs_Object * parent,
++				       const YCHAR * name,
++				       __u32 mode,
++				       __u32 uid,
++				       __u32 gid,
++				       yaffs_Object * equivalentObject,
++				       const YCHAR * aliasString, __u32 rdev)
++{
++	yaffs_Object *in;
++	YCHAR *str;
++
++	yaffs_Device *dev = parent->myDev;
++
++	/* Check if the entry exists. If it does then fail the call since we don't want a dup.*/
++	if (yaffs_FindObjectByName(parent, name)) {
++		return NULL;
++	}
++
++	in = yaffs_CreateNewObject(dev, -1, type);
++	
++	if(type == YAFFS_OBJECT_TYPE_SYMLINK){
++		str = yaffs_CloneString(aliasString);
++		if(!str){
++			yaffs_FreeObject(in);
++			return NULL;
++		}
++	}
++	
++	
++
++	if (in) {
++		in->chunkId = -1;
++		in->valid = 1;
++		in->variantType = type;
++
++		in->yst_mode = mode;
++
++#ifdef CONFIG_YAFFS_WINCE
++		yfsd_WinFileTimeNow(in->win_atime);
++		in->win_ctime[0] = in->win_mtime[0] = in->win_atime[0];
++		in->win_ctime[1] = in->win_mtime[1] = in->win_atime[1];
++
++#else
++		in->yst_atime = in->yst_mtime = in->yst_ctime = Y_CURRENT_TIME;
++
++		in->yst_rdev = rdev;
++		in->yst_uid = uid;
++		in->yst_gid = gid;
++#endif
++		in->nDataChunks = 0;
++
++		yaffs_SetObjectName(in, name);
++		in->dirty = 1;
++
++		yaffs_AddObjectToDirectory(parent, in);
++
++		in->myDev = parent->myDev;
++
++		switch (type) {
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			in->variant.symLinkVariant.alias = str;
++			break;
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++			in->variant.hardLinkVariant.equivalentObject =
++			    equivalentObject;
++			in->variant.hardLinkVariant.equivalentObjectId =
++			    equivalentObject->objectId;
++			list_add(&in->hardLinks, &equivalentObject->hardLinks);
++			break;
++		case YAFFS_OBJECT_TYPE_FILE:	
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++			/* do nothing */
++			break;
++		}
++
++		if (yaffs_UpdateObjectHeader(in, name, 0, 0, 0) < 0) {
++			/* Could not create the object header, fail the creation */
++			yaffs_DestroyObject(in);
++			in = NULL;
++		}
++
++	}
++
++	return in;
++}
++
++yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name,
++			      __u32 mode, __u32 uid, __u32 gid)
++{
++	return yaffs_MknodObject(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
++				 uid, gid, NULL, NULL, 0);
++}
++
++yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name,
++				   __u32 mode, __u32 uid, __u32 gid)
++{
++	return yaffs_MknodObject(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
++				 mode, uid, gid, NULL, NULL, 0);
++}
++
++yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name,
++				 __u32 mode, __u32 uid, __u32 gid, __u32 rdev)
++{
++	return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
++				 uid, gid, NULL, NULL, rdev);
++}
++
++yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name,
++				 __u32 mode, __u32 uid, __u32 gid,
++				 const YCHAR * alias)
++{
++	return yaffs_MknodObject(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
++				 uid, gid, NULL, alias, 0);
++}
++
++/* yaffs_Link returns the object id of the equivalent object.*/
++yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name,
++			 yaffs_Object * equivalentObject)
++{
++	/* Get the real object in case we were fed a hard link as an equivalent object */
++	equivalentObject = yaffs_GetEquivalentObject(equivalentObject);
++
++	if (yaffs_MknodObject
++	    (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
++	     equivalentObject, NULL, 0)) {
++		return equivalentObject;
++	} else {
++		return NULL;
++	}
++
++}
++
++static int yaffs_ChangeObjectName(yaffs_Object * obj, yaffs_Object * newDir,
++				  const YCHAR * newName, int force, int shadows)
++{
++	int unlinkOp;
++	int deleteOp;
++
++	yaffs_Object *existingTarget;
++
++	if (newDir == NULL) {
++		newDir = obj->parent;	/* use the old directory */
++	}
++
++	if (newDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragendy: yaffs_ChangeObjectName: newDir is not a directory"
++		    TENDSTR)));
++		YBUG();
++	}
++	
++	/* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
++	if (obj->myDev->isYaffs2) {
++		unlinkOp = (newDir == obj->myDev->unlinkedDir);
++	} else {
++		unlinkOp = (newDir == obj->myDev->unlinkedDir
++			    && obj->variantType == YAFFS_OBJECT_TYPE_FILE);
++	}
++
++	deleteOp = (newDir == obj->myDev->deletedDir);
++
++	existingTarget = yaffs_FindObjectByName(newDir, newName);
++
++	/* If the object is a file going into the unlinked directory, 
++	 *   then it is OK to just stuff it in since duplicate names are allowed.
++	 *   else only proceed if the new name does not exist and if we're putting 
++	 *   it into a directory.
++	 */
++	if ((unlinkOp ||
++	     deleteOp ||
++	     force ||
++	     (shadows > 0) ||
++	     !existingTarget) &&
++	    newDir->variantType == YAFFS_OBJECT_TYPE_DIRECTORY) {
++		yaffs_SetObjectName(obj, newName);
++		obj->dirty = 1;
++
++		yaffs_AddObjectToDirectory(newDir, obj);
++
++		if (unlinkOp)
++			obj->unlinked = 1;
++
++		/* If it is a deletion then we mark it as a shrink for gc purposes. */
++		if (yaffs_UpdateObjectHeader(obj, newName, 0, deleteOp, shadows)>= 0)
++			return YAFFS_OK;
++	}
++
++	return YAFFS_FAIL;
++}
++
++int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName,
++		       yaffs_Object * newDir, const YCHAR * newName)
++{
++	yaffs_Object *obj;
++	yaffs_Object *existingTarget;
++	int force = 0;
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++	/* Special case for case insemsitive systems (eg. WinCE).
++	 * While look-up is case insensitive, the name isn't.
++	 * Therefore we might want to change x.txt to X.txt
++	*/
++	if (oldDir == newDir && yaffs_strcmp(oldName, newName) == 0) {
++		force = 1;
++	}
++#endif
++
++	obj = yaffs_FindObjectByName(oldDir, oldName);
++	/* Check new name to long. */
++	if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK &&
++	    yaffs_strlen(newName) > YAFFS_MAX_ALIAS_LENGTH)
++	  /* ENAMETOOLONG */
++	  return YAFFS_FAIL;
++	else if (obj->variantType != YAFFS_OBJECT_TYPE_SYMLINK &&
++		 yaffs_strlen(newName) > YAFFS_MAX_NAME_LENGTH)
++	  /* ENAMETOOLONG */
++	  return YAFFS_FAIL;
++
++	if (obj && obj->renameAllowed) {
++
++		/* Now do the handling for an existing target, if there is one */
++
++		existingTarget = yaffs_FindObjectByName(newDir, newName);
++		if (existingTarget &&
++		    existingTarget->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
++		    !list_empty(&existingTarget->variant.directoryVariant.children)) {
++			/* There is a target that is a non-empty directory, so we fail */
++			return YAFFS_FAIL;	/* EEXIST or ENOTEMPTY */
++		} else if (existingTarget && existingTarget != obj) {
++			/* Nuke the target first, using shadowing, 
++			 * but only if it isn't the same object
++			 */
++			yaffs_ChangeObjectName(obj, newDir, newName, force,
++					       existingTarget->objectId);
++			yaffs_UnlinkObject(existingTarget);
++		}
++
++		return yaffs_ChangeObjectName(obj, newDir, newName, 1, 0);
++	}
++	return YAFFS_FAIL;
++}
++
++/*------------------------- Block Management and Page Allocation ----------------*/
++
++static int yaffs_InitialiseBlocks(yaffs_Device * dev)
++{
++	int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
++	
++	dev->blockInfo = NULL;
++	dev->chunkBits = NULL;
++	
++	dev->allocationBlock = -1;	/* force it to get a new one */
++
++	/* If the first allocation strategy fails, thry the alternate one */
++	dev->blockInfo = YMALLOC(nBlocks * sizeof(yaffs_BlockInfo));
++	if(!dev->blockInfo){
++		dev->blockInfo = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockInfo));
++		dev->blockInfoAlt = 1;
++	}
++	else
++		dev->blockInfoAlt = 0;
++		
++	if(dev->blockInfo){
++	
++		/* Set up dynamic blockinfo stuff. */
++		dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
++		dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
++		if(!dev->chunkBits){
++			dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
++			dev->chunkBitsAlt = 1;
++		}
++		else
++			dev->chunkBitsAlt = 0;
++	}
++	
++	if (dev->blockInfo && dev->chunkBits) {
++		memset(dev->blockInfo, 0, nBlocks * sizeof(yaffs_BlockInfo));
++		memset(dev->chunkBits, 0, dev->chunkBitmapStride * nBlocks);
++		return YAFFS_OK;
++	}
++
++	return YAFFS_FAIL;
++
++}
++
++static void yaffs_DeinitialiseBlocks(yaffs_Device * dev)
++{
++	if(dev->blockInfoAlt && dev->blockInfo)
++		YFREE_ALT(dev->blockInfo);
++	else if(dev->blockInfo)
++		YFREE(dev->blockInfo);
++
++	dev->blockInfoAlt = 0;
++
++	dev->blockInfo = NULL;
++	
++	if(dev->chunkBitsAlt && dev->chunkBits)
++		YFREE_ALT(dev->chunkBits);
++	else if(dev->chunkBits)
++		YFREE(dev->chunkBits);
++	dev->chunkBitsAlt = 0;
++	dev->chunkBits = NULL;
++}
++
++static int yaffs_BlockNotDisqualifiedFromGC(yaffs_Device * dev,
++					    yaffs_BlockInfo * bi)
++{
++	int i;
++	__u32 seq;
++	yaffs_BlockInfo *b;
++
++	if (!dev->isYaffs2)
++		return 1;	/* disqualification only applies to yaffs2. */
++
++	if (!bi->hasShrinkHeader)
++		return 1;	/* can gc */
++
++	/* Find the oldest dirty sequence number if we don't know it and save it
++	 * so we don't have to keep recomputing it.
++	 */
++	if (!dev->oldestDirtySequence) {
++		seq = dev->sequenceNumber;
++
++		for (i = dev->internalStartBlock; i <= dev->internalEndBlock;
++		     i++) {
++			b = yaffs_GetBlockInfo(dev, i);
++			if (b->blockState == YAFFS_BLOCK_STATE_FULL &&
++			    (b->pagesInUse - b->softDeletions) <
++			    dev->nChunksPerBlock && b->sequenceNumber < seq) {
++				seq = b->sequenceNumber;
++			}
++		}
++		dev->oldestDirtySequence = seq;
++	}
++
++	/* Can't do gc of this block if there are any blocks older than this one that have
++	 * discarded pages.
++	 */
++	return (bi->sequenceNumber <= dev->oldestDirtySequence);
++
++}
++
++/* FindDiretiestBlock is used to select the dirtiest block (or close enough)
++ * for garbage collection.
++ */
++
++static int yaffs_FindBlockForGarbageCollection(yaffs_Device * dev,
++					       int aggressive)
++{
++
++	int b = dev->currentDirtyChecker;
++
++	int i;
++	int iterations;
++	int dirtiest = -1;
++	int pagesInUse = 0;
++	int prioritised=0;
++	yaffs_BlockInfo *bi;
++	int pendingPrioritisedExist = 0;
++	
++	/* First let's see if we need to grab a prioritised block */
++	if(dev->hasPendingPrioritisedGCs){
++		for(i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++){
++
++			bi = yaffs_GetBlockInfo(dev, i);
++			//yaffs_VerifyBlock(dev,bi,i);
++			
++			if(bi->gcPrioritise) {
++				pendingPrioritisedExist = 1;
++				if(bi->blockState == YAFFS_BLOCK_STATE_FULL &&
++				   yaffs_BlockNotDisqualifiedFromGC(dev, bi)){
++					pagesInUse = (bi->pagesInUse - bi->softDeletions);
++					dirtiest = i;
++					prioritised = 1;
++					aggressive = 1; /* Fool the non-aggressive skip logiv below */
++				}
++			}
++		}
++		
++		if(!pendingPrioritisedExist) /* None found, so we can clear this */
++			dev->hasPendingPrioritisedGCs = 0;
++	}
++
++	/* If we're doing aggressive GC then we are happy to take a less-dirty block, and
++	 * search harder.
++	 * else (we're doing a leasurely gc), then we only bother to do this if the
++	 * block has only a few pages in use.
++	 */
++
++	dev->nonAggressiveSkip--;
++
++	if (!aggressive && (dev->nonAggressiveSkip > 0)) {
++		return -1;
++	}
++
++	if(!prioritised)
++		pagesInUse =
++	    		(aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
++
++	if (aggressive) {
++		iterations =
++		    dev->internalEndBlock - dev->internalStartBlock + 1;
++	} else {
++		iterations =
++		    dev->internalEndBlock - dev->internalStartBlock + 1;
++		iterations = iterations / 16;
++		if (iterations > 200) {
++			iterations = 200;
++		}
++	}
++
++	for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) {
++		b++;
++		if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
++			b = dev->internalStartBlock;
++		}
++
++		if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
++			T(YAFFS_TRACE_ERROR,
++			  (TSTR("**>> Block %d is not valid" TENDSTR), b));
++			YBUG();
++		}
++
++		bi = yaffs_GetBlockInfo(dev, b);
++
++#if 0
++		if (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT) {
++			dirtiest = b;
++			pagesInUse = 0;
++		}
++		else 
++#endif
++
++		if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
++		       (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
++		        yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
++			dirtiest = b;
++			pagesInUse = (bi->pagesInUse - bi->softDeletions);
++		}
++	}
++
++	dev->currentDirtyChecker = b;
++
++	if (dirtiest > 0) {
++		T(YAFFS_TRACE_GC,
++		  (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
++		   dev->nChunksPerBlock - pagesInUse,prioritised));
++	}
++
++	dev->oldestDirtySequence = 0;
++
++	if (dirtiest > 0) {
++		dev->nonAggressiveSkip = 4;
++	}
++
++	return dirtiest;
++}
++
++static void yaffs_BlockBecameDirty(yaffs_Device * dev, int blockNo)
++{
++	yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockNo);
++
++	int erasedOk = 0;
++
++	/* If the block is still healthy erase it and mark as clean.
++	 * If the block has had a data failure, then retire it.
++	 */
++	 
++	T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
++		(TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR),
++		blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : ""));
++		
++	bi->blockState = YAFFS_BLOCK_STATE_DIRTY;
++
++	if (!bi->needsRetiring) {
++		yaffs_InvalidateCheckpoint(dev);
++		erasedOk = yaffs_EraseBlockInNAND(dev, blockNo);
++		if (!erasedOk) {
++			dev->nErasureFailures++;
++			T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++			  (TSTR("**>> Erasure failed %d" TENDSTR), blockNo));
++		}
++	}
++
++	if (erasedOk && 
++	    ((yaffs_traceMask & YAFFS_TRACE_ERASE) || !yaffs_SkipVerification(dev))) {
++		int i;
++		for (i = 0; i < dev->nChunksPerBlock; i++) {
++			if (!yaffs_CheckChunkErased
++			    (dev, blockNo * dev->nChunksPerBlock + i)) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   (">>Block %d erasure supposedly OK, but chunk %d not erased"
++				    TENDSTR), blockNo, i));
++			}
++		}
++	}
++
++	if (erasedOk) {
++		/* Clean it up... */
++		bi->blockState = YAFFS_BLOCK_STATE_EMPTY;
++		dev->nErasedBlocks++;
++		bi->pagesInUse = 0;
++		bi->softDeletions = 0;
++		bi->hasShrinkHeader = 0;
++		bi->skipErasedCheck = 1;  /* This is clean, so no need to check */
++		bi->gcPrioritise = 0;
++		yaffs_ClearChunkBits(dev, blockNo);
++
++		T(YAFFS_TRACE_ERASE,
++		  (TSTR("Erased block %d" TENDSTR), blockNo));
++	} else {
++		dev->nFreeChunks -= dev->nChunksPerBlock;	/* We lost a block of free space */
++
++		yaffs_RetireBlock(dev, blockNo);
++		T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++		  (TSTR("**>> Block %d retired" TENDSTR), blockNo));
++	}
++}
++
++static int yaffs_FindBlockForAllocation(yaffs_Device * dev)
++{
++	int i;
++
++	yaffs_BlockInfo *bi;
++
++	if (dev->nErasedBlocks < 1) {
++		/* Hoosterman we've got a problem.
++		 * Can't get space to gc
++		 */
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR("yaffs tragedy: no more eraased blocks" TENDSTR)));
++
++		return -1;
++	}
++	
++	/* Find an empty block. */
++
++	for (i = dev->internalStartBlock; i <= dev->internalEndBlock; i++) {
++		dev->allocationBlockFinder++;
++		if (dev->allocationBlockFinder < dev->internalStartBlock
++		    || dev->allocationBlockFinder > dev->internalEndBlock) {
++			dev->allocationBlockFinder = dev->internalStartBlock;
++		}
++
++		bi = yaffs_GetBlockInfo(dev, dev->allocationBlockFinder);
++
++		if (bi->blockState == YAFFS_BLOCK_STATE_EMPTY) {
++			bi->blockState = YAFFS_BLOCK_STATE_ALLOCATING;
++			dev->sequenceNumber++;
++			bi->sequenceNumber = dev->sequenceNumber;
++			dev->nErasedBlocks--;
++			T(YAFFS_TRACE_ALLOCATE,
++			  (TSTR("Allocated block %d, seq  %d, %d left" TENDSTR),
++			   dev->allocationBlockFinder, dev->sequenceNumber,
++			   dev->nErasedBlocks));
++			return dev->allocationBlockFinder;
++		}
++	}
++
++	T(YAFFS_TRACE_ALWAYS,
++	  (TSTR
++	   ("yaffs tragedy: no more eraased blocks, but there should have been %d"
++	    TENDSTR), dev->nErasedBlocks));
++
++	return -1;
++}
++
++
++// Check if there's space to allocate...
++// Thinks.... do we need top make this ths same as yaffs_GetFreeChunks()?
++static int yaffs_CheckSpaceForAllocation(yaffs_Device * dev)
++{
++	int reservedChunks;
++	int reservedBlocks = dev->nReservedBlocks;
++	int checkpointBlocks;
++	
++	checkpointBlocks =  dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint;
++	if(checkpointBlocks < 0)
++		checkpointBlocks = 0;
++	
++	reservedChunks = ((reservedBlocks + checkpointBlocks) * dev->nChunksPerBlock);
++	
++	return (dev->nFreeChunks > reservedChunks);
++}
++
++static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr)
++{
++	int retVal;
++	yaffs_BlockInfo *bi;
++
++	if (dev->allocationBlock < 0) {
++		/* Get next block to allocate off */
++		dev->allocationBlock = yaffs_FindBlockForAllocation(dev);
++		dev->allocationPage = 0;
++	}
++
++	if (!useReserve && !yaffs_CheckSpaceForAllocation(dev)) {
++		/* Not enough space to allocate unless we're allowed to use the reserve. */
++		return -1;
++	}
++
++	if (dev->nErasedBlocks < dev->nReservedBlocks
++	    && dev->allocationPage == 0) {
++		T(YAFFS_TRACE_ALLOCATE, (TSTR("Allocating reserve" TENDSTR)));
++	}
++
++	/* Next page please.... */
++	if (dev->allocationBlock >= 0) {
++		bi = yaffs_GetBlockInfo(dev, dev->allocationBlock);
++
++		retVal = (dev->allocationBlock * dev->nChunksPerBlock) +
++		    dev->allocationPage;
++		bi->pagesInUse++;
++		yaffs_SetChunkBit(dev, dev->allocationBlock,
++				  dev->allocationPage);
++
++		dev->allocationPage++;
++
++		dev->nFreeChunks--;
++
++		/* If the block is full set the state to full */
++		if (dev->allocationPage >= dev->nChunksPerBlock) {
++			bi->blockState = YAFFS_BLOCK_STATE_FULL;
++			dev->allocationBlock = -1;
++		}
++
++		if(blockUsedPtr)
++			*blockUsedPtr = bi;
++			
++		return retVal;
++	}
++	
++	T(YAFFS_TRACE_ERROR,
++	  (TSTR("!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" TENDSTR)));
++
++	return -1;
++}
++
++static int yaffs_GetErasedChunks(yaffs_Device * dev)
++{
++	int n;
++
++	n = dev->nErasedBlocks * dev->nChunksPerBlock;
++
++	if (dev->allocationBlock > 0) {
++		n += (dev->nChunksPerBlock - dev->allocationPage);
++	}
++
++	return n;
++
++}
++
++static int yaffs_GarbageCollectBlock(yaffs_Device * dev, int block)
++{
++	int oldChunk;
++	int newChunk;
++	int chunkInBlock;
++	int markNAND;
++	int retVal = YAFFS_OK;
++	int cleanups = 0;
++	int i;
++	int isCheckpointBlock;
++	int matchingChunk;
++
++	int chunksBefore = yaffs_GetErasedChunks(dev);
++	int chunksAfter;
++
++	yaffs_ExtendedTags tags;
++
++	yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, block);
++
++	yaffs_Object *object;
++
++	isCheckpointBlock = (bi->blockState == YAFFS_BLOCK_STATE_CHECKPOINT);
++	
++	bi->blockState = YAFFS_BLOCK_STATE_COLLECTING;
++
++	T(YAFFS_TRACE_TRACING,
++	  (TSTR("Collecting block %d, in use %d, shrink %d, " TENDSTR), block,
++	   bi->pagesInUse, bi->hasShrinkHeader));
++
++	/*yaffs_VerifyFreeChunks(dev); */
++
++	bi->hasShrinkHeader = 0;	/* clear the flag so that the block can erase */
++
++	/* Take off the number of soft deleted entries because
++	 * they're going to get really deleted during GC.
++	 */
++	dev->nFreeChunks -= bi->softDeletions;
++
++	dev->isDoingGC = 1;
++
++	if (isCheckpointBlock ||
++	    !yaffs_StillSomeChunkBits(dev, block)) {
++		T(YAFFS_TRACE_TRACING,
++		  (TSTR
++		   ("Collecting block %d that has no chunks in use" TENDSTR),
++		   block));
++		yaffs_BlockBecameDirty(dev, block);
++	} else {
++
++		__u8 *buffer = yaffs_GetTempBuffer(dev, __LINE__);
++		
++		yaffs_VerifyBlock(dev,bi,block);
++
++		for (chunkInBlock = 0, oldChunk = block * dev->nChunksPerBlock;
++		     chunkInBlock < dev->nChunksPerBlock
++		     && yaffs_StillSomeChunkBits(dev, block);
++		     chunkInBlock++, oldChunk++) {
++			if (yaffs_CheckChunkBit(dev, block, chunkInBlock)) {
++
++				/* This page is in use and might need to be copied off */
++
++				markNAND = 1;
++
++				yaffs_InitialiseTags(&tags);
++
++				yaffs_ReadChunkWithTagsFromNAND(dev, oldChunk,
++								buffer, &tags);
++
++				object =
++				    yaffs_FindObjectByNumber(dev,
++							     tags.objectId);
++
++				T(YAFFS_TRACE_GC_DETAIL,
++				  (TSTR
++				   ("Collecting page %d, %d %d %d " TENDSTR),
++				   chunkInBlock, tags.objectId, tags.chunkId,
++				   tags.byteCount));
++				   
++				if(object && !yaffs_SkipVerification(dev)){
++					if(tags.chunkId == 0)
++						matchingChunk = object->chunkId;
++					else if(object->softDeleted)
++						matchingChunk = oldChunk; /* Defeat the test */
++					else
++						matchingChunk = yaffs_FindChunkInFile(object,tags.chunkId,NULL);
++					
++					if(oldChunk != matchingChunk)
++						T(YAFFS_TRACE_ERROR,
++						  (TSTR("gc: page in gc mismatch: %d %d %d %d"TENDSTR),
++						  oldChunk,matchingChunk,tags.objectId, tags.chunkId));
++						
++				}
++
++				if (!object) {
++					T(YAFFS_TRACE_ERROR,
++					  (TSTR
++					   ("page %d in gc has no object: %d %d %d "
++					    TENDSTR), oldChunk,
++					    tags.objectId, tags.chunkId, tags.byteCount));
++				}
++
++				if (object && object->deleted
++				    && tags.chunkId != 0) {
++					/* Data chunk in a deleted file, throw it away
++					 * It's a soft deleted data chunk,
++					 * No need to copy this, just forget about it and 
++					 * fix up the object.
++					 */
++
++					object->nDataChunks--;
++
++					if (object->nDataChunks <= 0) {
++						/* remeber to clean up the object */
++						dev->gcCleanupList[cleanups] =
++						    tags.objectId;
++						cleanups++;
++					}
++					markNAND = 0;
++				} else if (0
++					   /* Todo object && object->deleted && object->nDataChunks == 0 */
++					   ) {
++					/* Deleted object header with no data chunks.
++					 * Can be discarded and the file deleted.
++					 */
++					object->chunkId = 0;
++					yaffs_FreeTnode(object->myDev,
++							object->variant.
++							fileVariant.top);
++					object->variant.fileVariant.top = NULL;
++					yaffs_DoGenericObjectDeletion(object);
++
++				} else if (object) {
++					/* It's either a data chunk in a live file or
++					 * an ObjectHeader, so we're interested in it.
++					 * NB Need to keep the ObjectHeaders of deleted files
++					 * until the whole file has been deleted off
++					 */
++					tags.serialNumber++;
++
++					dev->nGCCopies++;
++
++					if (tags.chunkId == 0) {
++						/* It is an object Id,
++						 * We need to nuke the shrinkheader flags first
++						 * We no longer want the shrinkHeader flag since its work is done
++						 * and if it is left in place it will mess up scanning.
++						 * Also, clear out any shadowing stuff
++						 */
++
++						yaffs_ObjectHeader *oh;
++						oh = (yaffs_ObjectHeader *)buffer;
++						oh->isShrink = 0;
++						oh->shadowsObject = -1;
++						tags.extraShadows = 0;
++						tags.extraIsShrinkHeader = 0;
++						
++						yaffs_VerifyObjectHeader(object,oh,&tags,1);
++					}
++
++					newChunk =
++					    yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &tags, 1);
++
++					if (newChunk < 0) {
++						retVal = YAFFS_FAIL;
++					} else {
++
++						/* Ok, now fix up the Tnodes etc. */
++
++						if (tags.chunkId == 0) {
++							/* It's a header */
++							object->chunkId =  newChunk;
++							object->serial =   tags.serialNumber;
++						} else {
++							/* It's a data chunk */
++							yaffs_PutChunkIntoFile
++							    (object,
++							     tags.chunkId,
++							     newChunk, 0);
++						}
++					}
++				}
++
++				yaffs_DeleteChunk(dev, oldChunk, markNAND, __LINE__);
++
++			}
++		}
++
++		yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
++
++
++		/* Do any required cleanups */
++		for (i = 0; i < cleanups; i++) {
++			/* Time to delete the file too */
++			object =
++			    yaffs_FindObjectByNumber(dev,
++						     dev->gcCleanupList[i]);
++			if (object) {
++				yaffs_FreeTnode(dev,
++						object->variant.fileVariant.
++						top);
++				object->variant.fileVariant.top = NULL;
++				T(YAFFS_TRACE_GC,
++				  (TSTR
++				   ("yaffs: About to finally delete object %d"
++				    TENDSTR), object->objectId));
++				yaffs_DoGenericObjectDeletion(object);
++				object->myDev->nDeletedFiles--;
++			}
++
++		}
++
++	}
++
++	yaffs_VerifyCollectedBlock(dev,bi,block);
++	  
++	if (chunksBefore >= (chunksAfter = yaffs_GetErasedChunks(dev))) {
++		T(YAFFS_TRACE_GC,
++		  (TSTR
++		   ("gc did not increase free chunks before %d after %d"
++		    TENDSTR), chunksBefore, chunksAfter));
++	}
++
++	dev->isDoingGC = 0;
++
++	return YAFFS_OK;
++}
++
++/* New garbage collector
++ * If we're very low on erased blocks then we do aggressive garbage collection
++ * otherwise we do "leasurely" garbage collection.
++ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
++ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
++ *
++ * The idea is to help clear out space in a more spread-out manner.
++ * Dunno if it really does anything useful.
++ */
++static int yaffs_CheckGarbageCollection(yaffs_Device * dev)
++{
++	int block;
++	int aggressive;
++	int gcOk = YAFFS_OK;
++	int maxTries = 0;
++	
++	int checkpointBlockAdjust;
++
++	if (dev->isDoingGC) {
++		/* Bail out so we don't get recursive gc */
++		return YAFFS_OK;
++	}
++	
++	/* This loop should pass the first time.
++	 * We'll only see looping here if the erase of the collected block fails.
++	 */
++
++	do {
++		maxTries++;
++		
++		checkpointBlockAdjust = (dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint);
++		if(checkpointBlockAdjust < 0)
++			checkpointBlockAdjust = 0;
++
++		if (dev->nErasedBlocks < (dev->nReservedBlocks + checkpointBlockAdjust + 2)) {
++			/* We need a block soon...*/
++			aggressive = 1;
++		} else {
++			/* We're in no hurry */
++			aggressive = 0;
++		}
++
++		block = yaffs_FindBlockForGarbageCollection(dev, aggressive);
++
++		if (block > 0) {
++			dev->garbageCollections++;
++			if (!aggressive) {
++				dev->passiveGarbageCollections++;
++			}
++
++			T(YAFFS_TRACE_GC,
++			  (TSTR
++			   ("yaffs: GC erasedBlocks %d aggressive %d" TENDSTR),
++			   dev->nErasedBlocks, aggressive));
++
++			gcOk = yaffs_GarbageCollectBlock(dev, block);
++		}
++
++		if (dev->nErasedBlocks < (dev->nReservedBlocks) && block > 0) {
++			T(YAFFS_TRACE_GC,
++			  (TSTR
++			   ("yaffs: GC !!!no reclaim!!! erasedBlocks %d after try %d block %d"
++			    TENDSTR), dev->nErasedBlocks, maxTries, block));
++		}
++	} while ((dev->nErasedBlocks < dev->nReservedBlocks) && (block > 0)
++		 && (maxTries < 2));
++
++	return aggressive ? gcOk : YAFFS_OK;
++}
++
++/*-------------------------  TAGS --------------------------------*/
++
++static int yaffs_TagsMatch(const yaffs_ExtendedTags * tags, int objectId,
++			   int chunkInObject)
++{
++	return (tags->chunkId == chunkInObject &&
++		tags->objectId == objectId && !tags->chunkDeleted) ? 1 : 0;
++
++}
++
++
++/*-------------------- Data file manipulation -----------------*/
++
++static int yaffs_FindChunkInFile(yaffs_Object * in, int chunkInInode,
++				 yaffs_ExtendedTags * tags)
++{
++	/*Get the Tnode, then get the level 0 offset chunk offset */
++	yaffs_Tnode *tn;
++	int theChunk = -1;
++	yaffs_ExtendedTags localTags;
++	int retVal = -1;
++
++	yaffs_Device *dev = in->myDev;
++
++	if (!tags) {
++		/* Passed a NULL, so use our own tags space */
++		tags = &localTags;
++	}
++
++	tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
++
++	if (tn) {
++		theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++
++		retVal =
++		    yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
++					   chunkInInode);
++	}
++	return retVal;
++}
++
++static int yaffs_FindAndDeleteChunkInFile(yaffs_Object * in, int chunkInInode,
++					  yaffs_ExtendedTags * tags)
++{
++	/* Get the Tnode, then get the level 0 offset chunk offset */
++	yaffs_Tnode *tn;
++	int theChunk = -1;
++	yaffs_ExtendedTags localTags;
++
++	yaffs_Device *dev = in->myDev;
++	int retVal = -1;
++
++	if (!tags) {
++		/* Passed a NULL, so use our own tags space */
++		tags = &localTags;
++	}
++
++	tn = yaffs_FindLevel0Tnode(dev, &in->variant.fileVariant, chunkInInode);
++
++	if (tn) {
++
++		theChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++
++		retVal =
++		    yaffs_FindChunkInGroup(dev, theChunk, tags, in->objectId,
++					   chunkInInode);
++
++		/* Delete the entry in the filestructure (if found) */
++		if (retVal != -1) {
++			yaffs_PutLevel0Tnode(dev,tn,chunkInInode,0);
++		}
++	} else {
++		/*T(("No level 0 found for %d\n", chunkInInode)); */
++	}
++
++	if (retVal == -1) {
++		/* T(("Could not find %d to delete\n",chunkInInode)); */
++	}
++	return retVal;
++}
++
++#ifdef YAFFS_PARANOID
++
++static int yaffs_CheckFileSanity(yaffs_Object * in)
++{
++	int chunk;
++	int nChunks;
++	int fSize;
++	int failed = 0;
++	int objId;
++	yaffs_Tnode *tn;
++	yaffs_Tags localTags;
++	yaffs_Tags *tags = &localTags;
++	int theChunk;
++	int chunkDeleted;
++
++	if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
++		/* T(("Object not a file\n")); */
++		return YAFFS_FAIL;
++	}
++
++	objId = in->objectId;
++	fSize = in->variant.fileVariant.fileSize;
++	nChunks =
++	    (fSize + in->myDev->nDataBytesPerChunk - 1) / in->myDev->nDataBytesPerChunk;
++
++	for (chunk = 1; chunk <= nChunks; chunk++) {
++		tn = yaffs_FindLevel0Tnode(in->myDev, &in->variant.fileVariant,
++					   chunk);
++
++		if (tn) {
++
++			theChunk = yaffs_GetChunkGroupBase(dev,tn,chunk);
++
++			if (yaffs_CheckChunkBits
++			    (dev, theChunk / dev->nChunksPerBlock,
++			     theChunk % dev->nChunksPerBlock)) {
++
++				yaffs_ReadChunkTagsFromNAND(in->myDev, theChunk,
++							    tags,
++							    &chunkDeleted);
++				if (yaffs_TagsMatch
++				    (tags, in->objectId, chunk, chunkDeleted)) {
++					/* found it; */
++
++				}
++			} else {
++
++				failed = 1;
++			}
++
++		} else {
++			/* T(("No level 0 found for %d\n", chunk)); */
++		}
++	}
++
++	return failed ? YAFFS_FAIL : YAFFS_OK;
++}
++
++#endif
++
++static int yaffs_PutChunkIntoFile(yaffs_Object * in, int chunkInInode,
++				  int chunkInNAND, int inScan)
++{
++	/* NB inScan is zero unless scanning. 
++	 * For forward scanning, inScan is > 0; 
++	 * for backward scanning inScan is < 0
++	 */
++	 
++	yaffs_Tnode *tn;
++	yaffs_Device *dev = in->myDev;
++	int existingChunk;
++	yaffs_ExtendedTags existingTags;
++	yaffs_ExtendedTags newTags;
++	unsigned existingSerial, newSerial;
++
++	if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
++		/* Just ignore an attempt at putting a chunk into a non-file during scanning
++		 * If it is not during Scanning then something went wrong!
++		 */
++		if (!inScan) {
++			T(YAFFS_TRACE_ERROR,
++			  (TSTR
++			   ("yaffs tragedy:attempt to put data chunk into a non-file"
++			    TENDSTR)));
++			YBUG();
++		}
++
++		yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
++		return YAFFS_OK;
++	}
++
++	tn = yaffs_AddOrFindLevel0Tnode(dev, 
++					&in->variant.fileVariant,
++					chunkInInode,
++					NULL);
++	if (!tn) {
++		return YAFFS_FAIL;
++	}
++
++	existingChunk = yaffs_GetChunkGroupBase(dev,tn,chunkInInode);
++
++	if (inScan != 0) {
++		/* If we're scanning then we need to test for duplicates
++		 * NB This does not need to be efficient since it should only ever 
++		 * happen when the power fails during a write, then only one
++		 * chunk should ever be affected.
++		 *
++		 * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
++		 * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
++		 */
++
++		if (existingChunk != 0) {
++			/* NB Right now existing chunk will not be real chunkId if the device >= 32MB
++			 *    thus we have to do a FindChunkInFile to get the real chunk id.
++			 *
++			 * We have a duplicate now we need to decide which one to use:
++			 *
++			 * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
++			 * Forward scanning YAFFS2: The new one is what we use, dump the old one.
++			 * YAFFS1: Get both sets of tags and compare serial numbers.
++			 */
++
++			if (inScan > 0) {
++				/* Only do this for forward scanning */
++				yaffs_ReadChunkWithTagsFromNAND(dev,
++								chunkInNAND,
++								NULL, &newTags);
++
++				/* Do a proper find */
++				existingChunk =
++				    yaffs_FindChunkInFile(in, chunkInInode,
++							  &existingTags);
++			}
++
++			if (existingChunk <= 0) {
++				/*Hoosterman - how did this happen? */
++
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("yaffs tragedy: existing chunk < 0 in scan"
++				    TENDSTR)));
++
++			}
++
++			/* NB The deleted flags should be false, otherwise the chunks will 
++			 * not be loaded during a scan
++			 */
++
++			newSerial = newTags.serialNumber;
++			existingSerial = existingTags.serialNumber;
++
++			if ((inScan > 0) &&
++			    (in->myDev->isYaffs2 ||
++			     existingChunk <= 0 ||
++			     ((existingSerial + 1) & 3) == newSerial)) {
++				/* Forward scanning.                            
++				 * Use new
++				 * Delete the old one and drop through to update the tnode
++				 */
++				yaffs_DeleteChunk(dev, existingChunk, 1,
++						  __LINE__);
++			} else {
++				/* Backward scanning or we want to use the existing one
++				 * Use existing.
++				 * Delete the new one and return early so that the tnode isn't changed
++				 */
++				yaffs_DeleteChunk(dev, chunkInNAND, 1,
++						  __LINE__);
++				return YAFFS_OK;
++			}
++		}
++
++	}
++
++	if (existingChunk == 0) {
++		in->nDataChunks++;
++	}
++
++	yaffs_PutLevel0Tnode(dev,tn,chunkInInode,chunkInNAND);
++
++	return YAFFS_OK;
++}
++
++static int yaffs_ReadChunkDataFromObject(yaffs_Object * in, int chunkInInode,
++					 __u8 * buffer)
++{
++	int chunkInNAND = yaffs_FindChunkInFile(in, chunkInInode, NULL);
++
++	if (chunkInNAND >= 0) {
++		return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND,
++						       buffer,NULL);
++	} else {
++		T(YAFFS_TRACE_NANDACCESS,
++		  (TSTR("Chunk %d not found zero instead" TENDSTR),
++		   chunkInNAND));
++		/* get sane (zero) data if you read a hole */
++		memset(buffer, 0, in->myDev->nDataBytesPerChunk);	
++		return 0;
++	}
++
++}
++
++void yaffs_DeleteChunk(yaffs_Device * dev, int chunkId, int markNAND, int lyn)
++{
++	int block;
++	int page;
++	yaffs_ExtendedTags tags;
++	yaffs_BlockInfo *bi;
++
++	if (chunkId <= 0)
++		return;
++		
++
++	dev->nDeletions++;
++	block = chunkId / dev->nChunksPerBlock;
++	page = chunkId % dev->nChunksPerBlock;
++
++
++	if(!yaffs_CheckChunkBit(dev,block,page))
++		T(YAFFS_TRACE_VERIFY,
++		 	(TSTR("Deleting invalid chunk %d"TENDSTR),
++		 	 chunkId));
++
++	bi = yaffs_GetBlockInfo(dev, block);
++
++	T(YAFFS_TRACE_DELETION,
++	  (TSTR("line %d delete of chunk %d" TENDSTR), lyn, chunkId));
++
++	if (markNAND &&
++	    bi->blockState != YAFFS_BLOCK_STATE_COLLECTING && !dev->isYaffs2) {
++
++		yaffs_InitialiseTags(&tags);
++
++		tags.chunkDeleted = 1;
++
++		yaffs_WriteChunkWithTagsToNAND(dev, chunkId, NULL, &tags);
++		yaffs_HandleUpdateChunk(dev, chunkId, &tags);
++	} else {
++		dev->nUnmarkedDeletions++;
++	}
++
++	/* Pull out of the management area.
++	 * If the whole block became dirty, this will kick off an erasure.
++	 */
++	if (bi->blockState == YAFFS_BLOCK_STATE_ALLOCATING ||
++	    bi->blockState == YAFFS_BLOCK_STATE_FULL ||
++	    bi->blockState == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++	    bi->blockState == YAFFS_BLOCK_STATE_COLLECTING) {
++		dev->nFreeChunks++;
++
++		yaffs_ClearChunkBit(dev, block, page);
++
++		bi->pagesInUse--;
++
++		if (bi->pagesInUse == 0 &&
++		    !bi->hasShrinkHeader &&
++		    bi->blockState != YAFFS_BLOCK_STATE_ALLOCATING &&
++		    bi->blockState != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++			yaffs_BlockBecameDirty(dev, block);
++		}
++
++	} else {
++		/* T(("Bad news deleting chunk %d\n",chunkId)); */
++	}
++
++}
++
++static int yaffs_WriteChunkDataToObject(yaffs_Object * in, int chunkInInode,
++					const __u8 * buffer, int nBytes,
++					int useReserve)
++{
++	/* Find old chunk Need to do this to get serial number
++	 * Write new one and patch into tree.
++	 * Invalidate old tags.
++	 */
++
++	int prevChunkId;
++	yaffs_ExtendedTags prevTags;
++
++	int newChunkId;
++	yaffs_ExtendedTags newTags;
++
++	yaffs_Device *dev = in->myDev;
++
++	yaffs_CheckGarbageCollection(dev);
++
++	/* Get the previous chunk at this location in the file if it exists */
++	prevChunkId = yaffs_FindChunkInFile(in, chunkInInode, &prevTags);
++
++	/* Set up new tags */
++	yaffs_InitialiseTags(&newTags);
++
++	newTags.chunkId = chunkInInode;
++	newTags.objectId = in->objectId;
++	newTags.serialNumber =
++	    (prevChunkId >= 0) ? prevTags.serialNumber + 1 : 1;
++	newTags.byteCount = nBytes;
++
++	newChunkId =
++	    yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
++					      useReserve);
++
++	if (newChunkId >= 0) {
++		yaffs_PutChunkIntoFile(in, chunkInInode, newChunkId, 0);
++
++		if (prevChunkId >= 0) {
++			yaffs_DeleteChunk(dev, prevChunkId, 1, __LINE__);
++
++		}
++
++		yaffs_CheckFileSanity(in);
++	}
++	return newChunkId;
++
++}
++
++/* UpdateObjectHeader updates the header on NAND for an object.
++ * If name is not NULL, then that new name is used.
++ */
++int yaffs_UpdateObjectHeader(yaffs_Object * in, const YCHAR * name, int force,
++			     int isShrink, int shadows)
++{
++
++	yaffs_BlockInfo *bi;
++
++	yaffs_Device *dev = in->myDev;
++
++	int prevChunkId;
++	int retVal = 0;
++	int result = 0;
++
++	int newChunkId;
++	yaffs_ExtendedTags newTags;
++	yaffs_ExtendedTags oldTags;
++
++	__u8 *buffer = NULL;
++	YCHAR oldName[YAFFS_MAX_NAME_LENGTH + 1];
++
++	yaffs_ObjectHeader *oh = NULL;
++	
++	yaffs_strcpy(oldName,"silly old name");
++
++	if (!in->fake || force) {
++
++		yaffs_CheckGarbageCollection(dev);
++		yaffs_CheckObjectDetailsLoaded(in);
++
++		buffer = yaffs_GetTempBuffer(in->myDev, __LINE__);
++		oh = (yaffs_ObjectHeader *) buffer;
++
++		prevChunkId = in->chunkId;
++
++		if (prevChunkId >= 0) {
++			result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
++							buffer, &oldTags);
++			
++			yaffs_VerifyObjectHeader(in,oh,&oldTags,0);
++										
++			memcpy(oldName, oh->name, sizeof(oh->name));
++		}
++
++		memset(buffer, 0xFF, dev->nDataBytesPerChunk);
++
++		oh->type = in->variantType;
++		oh->yst_mode = in->yst_mode;
++		oh->shadowsObject = shadows;
++
++#ifdef CONFIG_YAFFS_WINCE
++		oh->win_atime[0] = in->win_atime[0];
++		oh->win_ctime[0] = in->win_ctime[0];
++		oh->win_mtime[0] = in->win_mtime[0];
++		oh->win_atime[1] = in->win_atime[1];
++		oh->win_ctime[1] = in->win_ctime[1];
++		oh->win_mtime[1] = in->win_mtime[1];
++#else
++		oh->yst_uid = in->yst_uid;
++		oh->yst_gid = in->yst_gid;
++		oh->yst_atime = in->yst_atime;
++		oh->yst_mtime = in->yst_mtime;
++		oh->yst_ctime = in->yst_ctime;
++		oh->yst_rdev = in->yst_rdev;
++#endif
++		if (in->parent) {
++			oh->parentObjectId = in->parent->objectId;
++		} else {
++			oh->parentObjectId = 0;
++		}
++
++		if (name && *name) {
++			memset(oh->name, 0, sizeof(oh->name));
++			yaffs_strncpy(oh->name, name, YAFFS_MAX_NAME_LENGTH);
++		} else if (prevChunkId>=0) {
++			memcpy(oh->name, oldName, sizeof(oh->name));
++		} else {
++			memset(oh->name, 0, sizeof(oh->name));
++		}
++
++		oh->isShrink = isShrink;
++
++		switch (in->variantType) {
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++			/* Should not happen */
++			break;
++		case YAFFS_OBJECT_TYPE_FILE:
++			oh->fileSize =
++			    (oh->parentObjectId == YAFFS_OBJECTID_DELETED
++			     || oh->parentObjectId ==
++			     YAFFS_OBJECTID_UNLINKED) ? 0 : in->variant.
++			    fileVariant.fileSize;
++			break;
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++			oh->equivalentObjectId =
++			    in->variant.hardLinkVariant.equivalentObjectId;
++			break;
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++			/* Do nothing */
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			/* Do nothing */
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			yaffs_strncpy(oh->alias,
++				      in->variant.symLinkVariant.alias,
++				      YAFFS_MAX_ALIAS_LENGTH);
++			oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++			break;
++		}
++
++		/* Tags */
++		yaffs_InitialiseTags(&newTags);
++		in->serial++;
++		newTags.chunkId = 0;
++		newTags.objectId = in->objectId;
++		newTags.serialNumber = in->serial;
++
++		/* Add extra info for file header */
++
++		newTags.extraHeaderInfoAvailable = 1;
++		newTags.extraParentObjectId = oh->parentObjectId;
++		newTags.extraFileLength = oh->fileSize;
++		newTags.extraIsShrinkHeader = oh->isShrink;
++		newTags.extraEquivalentObjectId = oh->equivalentObjectId;
++		newTags.extraShadows = (oh->shadowsObject > 0) ? 1 : 0;
++		newTags.extraObjectType = in->variantType;
++
++		yaffs_VerifyObjectHeader(in,oh,&newTags,1);
++
++		/* Create new chunk in NAND */
++		newChunkId =
++		    yaffs_WriteNewChunkWithTagsToNAND(dev, buffer, &newTags,
++						      (prevChunkId >= 0) ? 1 : 0);
++
++		if (newChunkId >= 0) {
++
++			in->chunkId = newChunkId;
++
++			if (prevChunkId >= 0) {
++				yaffs_DeleteChunk(dev, prevChunkId, 1,
++						  __LINE__);
++			}
++
++			if(!yaffs_ObjectHasCachedWriteData(in))
++				in->dirty = 0;
++
++			/* If this was a shrink, then mark the block that the chunk lives on */
++			if (isShrink) {
++				bi = yaffs_GetBlockInfo(in->myDev,
++							newChunkId /in->myDev->	nChunksPerBlock);
++				bi->hasShrinkHeader = 1;
++			}
++
++		}
++
++		retVal = newChunkId;
++
++	}
++
++	if (buffer)
++		yaffs_ReleaseTempBuffer(dev, buffer, __LINE__);
++
++	return retVal;
++}
++
++/*------------------------ Short Operations Cache ----------------------------------------
++ *   In many situations where there is no high level buffering (eg WinCE) a lot of
++ *   reads might be short sequential reads, and a lot of writes may be short 
++ *   sequential writes. eg. scanning/writing a jpeg file.
++ *   In these cases, a short read/write cache can provide a huge perfomance benefit 
++ *   with dumb-as-a-rock code.
++ *   In Linux, the page cache provides read buffering aand the short op cache provides write 
++ *   buffering.
++ *
++ *   There are a limited number (~10) of cache chunks per device so that we don't
++ *   need a very intelligent search.
++ */
++
++static int yaffs_ObjectHasCachedWriteData(yaffs_Object *obj)
++{
++	yaffs_Device *dev = obj->myDev;
++	int i;
++	yaffs_ChunkCache *cache;
++	int nCaches = obj->myDev->nShortOpCaches;
++	
++	for(i = 0; i < nCaches; i++){
++		cache = &dev->srCache[i];
++		if (cache->object == obj &&
++		    cache->dirty)
++			return 1;
++	}
++	
++	return 0;
++}
++
++
++static void yaffs_FlushFilesChunkCache(yaffs_Object * obj)
++{
++	yaffs_Device *dev = obj->myDev;
++	int lowest = -99;	/* Stop compiler whining. */
++	int i;
++	yaffs_ChunkCache *cache;
++	int chunkWritten = 0;
++	int nCaches = obj->myDev->nShortOpCaches;
++
++	if (nCaches > 0) {
++		do {
++			cache = NULL;
++
++			/* Find the dirty cache for this object with the lowest chunk id. */
++			for (i = 0; i < nCaches; i++) {
++				if (dev->srCache[i].object == obj &&
++				    dev->srCache[i].dirty) {
++					if (!cache
++					    || dev->srCache[i].chunkId <
++					    lowest) {
++						cache = &dev->srCache[i];
++						lowest = cache->chunkId;
++					}
++				}
++			}
++
++			if (cache && !cache->locked) {
++				/* Write it out and free it up */
++
++				chunkWritten =
++				    yaffs_WriteChunkDataToObject(cache->object,
++								 cache->chunkId,
++								 cache->data,
++								 cache->nBytes,
++								 1);
++				cache->dirty = 0;
++				cache->object = NULL;
++			}
++
++		} while (cache && chunkWritten > 0);
++
++		if (cache) {
++			/* Hoosterman, disk full while writing cache out. */
++			T(YAFFS_TRACE_ERROR,
++			  (TSTR("yaffs tragedy: no space during cache write" TENDSTR)));
++
++		}
++	}
++
++}
++
++/*yaffs_FlushEntireDeviceCache(dev)
++ *
++ *
++ */
++
++void yaffs_FlushEntireDeviceCache(yaffs_Device *dev)
++{
++	yaffs_Object *obj;
++	int nCaches = dev->nShortOpCaches;
++	int i;
++	
++	/* Find a dirty object in the cache and flush it...
++	 * until there are no further dirty objects.
++	 */
++	do {
++		obj = NULL;
++		for( i = 0; i < nCaches && !obj; i++) {
++			if (dev->srCache[i].object &&
++			    dev->srCache[i].dirty)
++				obj = dev->srCache[i].object;
++			    
++		}
++		if(obj)
++			yaffs_FlushFilesChunkCache(obj);
++			
++	} while(obj);
++	
++}
++
++
++/* Grab us a cache chunk for use.
++ * First look for an empty one. 
++ * Then look for the least recently used non-dirty one.
++ * Then look for the least recently used dirty one...., flush and look again.
++ */
++static yaffs_ChunkCache *yaffs_GrabChunkCacheWorker(yaffs_Device * dev)
++{
++	int i;
++	int usage;
++	int theOne;
++
++	if (dev->nShortOpCaches > 0) {
++		for (i = 0; i < dev->nShortOpCaches; i++) {
++			if (!dev->srCache[i].object) 
++				return &dev->srCache[i];
++		}
++
++		return NULL;
++
++		theOne = -1;
++		usage = 0;	/* just to stop the compiler grizzling */
++
++		for (i = 0; i < dev->nShortOpCaches; i++) {
++			if (!dev->srCache[i].dirty &&
++			    ((dev->srCache[i].lastUse < usage && theOne >= 0) ||
++			     theOne < 0)) {
++				usage = dev->srCache[i].lastUse;
++				theOne = i;
++			}
++		}
++
++
++		return theOne >= 0 ? &dev->srCache[theOne] : NULL;
++	} else {
++		return NULL;
++	}
++
++}
++
++static yaffs_ChunkCache *yaffs_GrabChunkCache(yaffs_Device * dev)
++{
++	yaffs_ChunkCache *cache;
++	yaffs_Object *theObj;
++	int usage;
++	int i;
++	int pushout;
++
++	if (dev->nShortOpCaches > 0) {
++		/* Try find a non-dirty one... */
++
++		cache = yaffs_GrabChunkCacheWorker(dev);
++
++		if (!cache) {
++			/* They were all dirty, find the last recently used object and flush
++			 * its cache, then  find again.
++			 * NB what's here is not very accurate, we actually flush the object
++			 * the last recently used page.
++			 */
++
++			/* With locking we can't assume we can use entry zero */
++
++			theObj = NULL;
++			usage = -1;
++			cache = NULL;
++			pushout = -1;
++
++			for (i = 0; i < dev->nShortOpCaches; i++) {
++				if (dev->srCache[i].object &&
++				    !dev->srCache[i].locked &&
++				    (dev->srCache[i].lastUse < usage || !cache))
++				{
++					usage = dev->srCache[i].lastUse;
++					theObj = dev->srCache[i].object;
++					cache = &dev->srCache[i];
++					pushout = i;
++				}
++			}
++
++			if (!cache || cache->dirty) {
++				/* Flush and try again */
++				yaffs_FlushFilesChunkCache(theObj);
++				cache = yaffs_GrabChunkCacheWorker(dev);
++			}
++
++		}
++		return cache;
++	} else
++		return NULL;
++
++}
++
++/* Find a cached chunk */
++static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object * obj,
++					      int chunkId)
++{
++	yaffs_Device *dev = obj->myDev;
++	int i;
++	if (dev->nShortOpCaches > 0) {
++		for (i = 0; i < dev->nShortOpCaches; i++) {
++			if (dev->srCache[i].object == obj &&
++			    dev->srCache[i].chunkId == chunkId) {
++				dev->cacheHits++;
++
++				return &dev->srCache[i];
++			}
++		}
++	}
++	return NULL;
++}
++
++/* Mark the chunk for the least recently used algorithym */
++static void yaffs_UseChunkCache(yaffs_Device * dev, yaffs_ChunkCache * cache,
++				int isAWrite)
++{
++
++	if (dev->nShortOpCaches > 0) {
++		if (dev->srLastUse < 0 || dev->srLastUse > 100000000) {
++			/* Reset the cache usages */
++			int i;
++			for (i = 1; i < dev->nShortOpCaches; i++) {
++				dev->srCache[i].lastUse = 0;
++			}
++			dev->srLastUse = 0;
++		}
++
++		dev->srLastUse++;
++
++		cache->lastUse = dev->srLastUse;
++
++		if (isAWrite) {
++			cache->dirty = 1;
++		}
++	}
++}
++
++/* Invalidate a single cache page.
++ * Do this when a whole page gets written,
++ * ie the short cache for this page is no longer valid.
++ */
++static void yaffs_InvalidateChunkCache(yaffs_Object * object, int chunkId)
++{
++	if (object->myDev->nShortOpCaches > 0) {
++		yaffs_ChunkCache *cache = yaffs_FindChunkCache(object, chunkId);
++
++		if (cache) {
++			cache->object = NULL;
++		}
++	}
++}
++
++/* Invalidate all the cache pages associated with this object
++ * Do this whenever ther file is deleted or resized.
++ */
++static void yaffs_InvalidateWholeChunkCache(yaffs_Object * in)
++{
++	int i;
++	yaffs_Device *dev = in->myDev;
++
++	if (dev->nShortOpCaches > 0) {
++		/* Invalidate it. */
++		for (i = 0; i < dev->nShortOpCaches; i++) {
++			if (dev->srCache[i].object == in) {
++				dev->srCache[i].object = NULL;
++			}
++		}
++	}
++}
++
++/*--------------------- Checkpointing --------------------*/
++
++
++static int yaffs_WriteCheckpointValidityMarker(yaffs_Device *dev,int head)
++{
++	yaffs_CheckpointValidity cp;
++	
++	memset(&cp,0,sizeof(cp));
++	
++	cp.structType = sizeof(cp);
++	cp.magic = YAFFS_MAGIC;
++	cp.version = YAFFS_CHECKPOINT_VERSION;
++	cp.head = (head) ? 1 : 0;
++	
++	return (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp))?
++		1 : 0;
++}
++
++static int yaffs_ReadCheckpointValidityMarker(yaffs_Device *dev, int head)
++{
++	yaffs_CheckpointValidity cp;
++	int ok;
++	
++	ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
++	
++	if(ok)
++		ok = (cp.structType == sizeof(cp)) &&
++		     (cp.magic == YAFFS_MAGIC) &&
++		     (cp.version == YAFFS_CHECKPOINT_VERSION) &&
++		     (cp.head == ((head) ? 1 : 0));
++	return ok ? 1 : 0;
++}
++
++static void yaffs_DeviceToCheckpointDevice(yaffs_CheckpointDevice *cp, 
++					   yaffs_Device *dev)
++{
++	cp->nErasedBlocks = dev->nErasedBlocks;
++	cp->allocationBlock = dev->allocationBlock;
++	cp->allocationPage = dev->allocationPage;
++	cp->nFreeChunks = dev->nFreeChunks;
++	
++	cp->nDeletedFiles = dev->nDeletedFiles;
++	cp->nUnlinkedFiles = dev->nUnlinkedFiles;
++	cp->nBackgroundDeletions = dev->nBackgroundDeletions;
++	cp->sequenceNumber = dev->sequenceNumber;
++	cp->oldestDirtySequence = dev->oldestDirtySequence;
++	
++}
++
++static void yaffs_CheckpointDeviceToDevice(yaffs_Device *dev,
++					   yaffs_CheckpointDevice *cp)
++{
++	dev->nErasedBlocks = cp->nErasedBlocks;
++	dev->allocationBlock = cp->allocationBlock;
++	dev->allocationPage = cp->allocationPage;
++	dev->nFreeChunks = cp->nFreeChunks;
++	
++	dev->nDeletedFiles = cp->nDeletedFiles;
++	dev->nUnlinkedFiles = cp->nUnlinkedFiles;
++	dev->nBackgroundDeletions = cp->nBackgroundDeletions;
++	dev->sequenceNumber = cp->sequenceNumber;
++	dev->oldestDirtySequence = cp->oldestDirtySequence;
++}
++
++
++static int yaffs_WriteCheckpointDevice(yaffs_Device *dev)
++{
++	yaffs_CheckpointDevice cp;
++	__u32 nBytes;
++	__u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
++
++	int ok;
++		
++	/* Write device runtime values*/
++	yaffs_DeviceToCheckpointDevice(&cp,dev);
++	cp.structType = sizeof(cp);
++	
++	ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++	
++	/* Write block info */
++	if(ok) {
++		nBytes = nBlocks * sizeof(yaffs_BlockInfo);
++		ok = (yaffs_CheckpointWrite(dev,dev->blockInfo,nBytes) == nBytes);
++	}
++		
++	/* Write chunk bits */		
++	if(ok) {
++		nBytes = nBlocks * dev->chunkBitmapStride;
++		ok = (yaffs_CheckpointWrite(dev,dev->chunkBits,nBytes) == nBytes);
++	}
++	return	 ok ? 1 : 0;
++
++}
++
++static int yaffs_ReadCheckpointDevice(yaffs_Device *dev)
++{
++	yaffs_CheckpointDevice cp;
++	__u32 nBytes;
++	__u32 nBlocks = (dev->internalEndBlock - dev->internalStartBlock + 1);
++
++	int ok;	
++	
++	ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
++	if(!ok)
++		return 0;
++		
++	if(cp.structType != sizeof(cp))
++		return 0;
++		
++	
++	yaffs_CheckpointDeviceToDevice(dev,&cp);
++	
++	nBytes = nBlocks * sizeof(yaffs_BlockInfo);
++	
++	ok = (yaffs_CheckpointRead(dev,dev->blockInfo,nBytes) == nBytes);
++	
++	if(!ok)
++		return 0;
++	nBytes = nBlocks * dev->chunkBitmapStride;
++	
++	ok = (yaffs_CheckpointRead(dev,dev->chunkBits,nBytes) == nBytes);
++	
++	return ok ? 1 : 0;
++}
++
++static void yaffs_ObjectToCheckpointObject(yaffs_CheckpointObject *cp,
++					   yaffs_Object *obj)
++{
++
++	cp->objectId = obj->objectId;
++	cp->parentId = (obj->parent) ? obj->parent->objectId : 0;
++	cp->chunkId = obj->chunkId;
++	cp->variantType = obj->variantType;			
++	cp->deleted = obj->deleted;
++	cp->softDeleted = obj->softDeleted;
++	cp->unlinked = obj->unlinked;
++	cp->fake = obj->fake;
++	cp->renameAllowed = obj->renameAllowed;
++	cp->unlinkAllowed = obj->unlinkAllowed;
++	cp->serial = obj->serial;
++	cp->nDataChunks = obj->nDataChunks;
++	
++	if(obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++		cp->fileSizeOrEquivalentObjectId = obj->variant.fileVariant.fileSize;
++	else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
++		cp->fileSizeOrEquivalentObjectId = obj->variant.hardLinkVariant.equivalentObjectId;
++}
++
++static void yaffs_CheckpointObjectToObject( yaffs_Object *obj,yaffs_CheckpointObject *cp)
++{
++
++	yaffs_Object *parent;
++	
++	obj->objectId = cp->objectId;
++	
++	if(cp->parentId)
++		parent = yaffs_FindOrCreateObjectByNumber(
++					obj->myDev,
++					cp->parentId,
++					YAFFS_OBJECT_TYPE_DIRECTORY);
++	else
++		parent = NULL;
++		
++	if(parent)
++		yaffs_AddObjectToDirectory(parent, obj);
++		
++	obj->chunkId = cp->chunkId;
++	obj->variantType = cp->variantType;			
++	obj->deleted = cp->deleted;
++	obj->softDeleted = cp->softDeleted;
++	obj->unlinked = cp->unlinked;
++	obj->fake = cp->fake;
++	obj->renameAllowed = cp->renameAllowed;
++	obj->unlinkAllowed = cp->unlinkAllowed;
++	obj->serial = cp->serial;
++	obj->nDataChunks = cp->nDataChunks;
++	
++	if(obj->variantType == YAFFS_OBJECT_TYPE_FILE)
++		obj->variant.fileVariant.fileSize = cp->fileSizeOrEquivalentObjectId;
++	else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK)
++		obj->variant.hardLinkVariant.equivalentObjectId = cp->fileSizeOrEquivalentObjectId;
++		
++	if(obj->objectId >= YAFFS_NOBJECT_BUCKETS)
++		obj->lazyLoaded = 1;
++}
++
++
++
++static int yaffs_CheckpointTnodeWorker(yaffs_Object * in, yaffs_Tnode * tn,
++				  	__u32 level, int chunkOffset)
++{
++	int i;
++	yaffs_Device *dev = in->myDev;
++	int ok = 1;
++	int nTnodeBytes = (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8;
++
++	if (tn) {
++		if (level > 0) {
++
++			for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++){
++				if (tn->internal[i]) {
++					ok = yaffs_CheckpointTnodeWorker(in,
++							tn->internal[i],
++							level - 1,
++							(chunkOffset<<YAFFS_TNODES_INTERNAL_BITS) + i);
++				}
++			}
++		} else if (level == 0) {
++			__u32 baseOffset = chunkOffset <<  YAFFS_TNODES_LEVEL0_BITS;
++			/* printf("write tnode at %d\n",baseOffset); */
++			ok = (yaffs_CheckpointWrite(dev,&baseOffset,sizeof(baseOffset)) == sizeof(baseOffset));
++			if(ok)
++				ok = (yaffs_CheckpointWrite(dev,tn,nTnodeBytes) == nTnodeBytes);
++		}
++	}
++
++	return ok;
++
++}
++
++static int yaffs_WriteCheckpointTnodes(yaffs_Object *obj)
++{
++	__u32 endMarker = ~0;
++	int ok = 1;
++	
++	if(obj->variantType == YAFFS_OBJECT_TYPE_FILE){
++		ok = yaffs_CheckpointTnodeWorker(obj,
++					    obj->variant.fileVariant.top,
++					    obj->variant.fileVariant.topLevel,
++					    0);
++		if(ok)
++			ok = (yaffs_CheckpointWrite(obj->myDev,&endMarker,sizeof(endMarker)) == 
++				sizeof(endMarker));
++	}
++	
++	return ok ? 1 : 0;
++}
++
++static int yaffs_ReadCheckpointTnodes(yaffs_Object *obj)
++{
++	__u32 baseChunk;
++	int ok = 1;
++	yaffs_Device *dev = obj->myDev;
++	yaffs_FileStructure *fileStructPtr = &obj->variant.fileVariant;
++	yaffs_Tnode *tn;
++	int nread = 0;
++	
++	ok = (yaffs_CheckpointRead(dev,&baseChunk,sizeof(baseChunk)) == sizeof(baseChunk));
++	
++	while(ok && (~baseChunk)){
++		nread++;
++		/* Read level 0 tnode */
++		
++		
++		/* printf("read  tnode at %d\n",baseChunk); */
++		tn = yaffs_GetTnodeRaw(dev);
++		if(tn)
++			ok = (yaffs_CheckpointRead(dev,tn,(dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8) ==
++			      (dev->tnodeWidth * YAFFS_NTNODES_LEVEL0)/8);
++		else
++			ok = 0;
++			
++		if(tn && ok){
++			ok = yaffs_AddOrFindLevel0Tnode(dev,
++					       		fileStructPtr,
++					       		baseChunk,
++					       		tn) ? 1 : 0;
++					       		
++		}
++			
++		if(ok)
++			ok = (yaffs_CheckpointRead(dev,&baseChunk,sizeof(baseChunk)) == sizeof(baseChunk));
++		
++	}
++
++	T(YAFFS_TRACE_CHECKPOINT,(
++		TSTR("Checkpoint read tnodes %d records, last %d. ok %d" TENDSTR),
++		nread,baseChunk,ok));
++
++	return ok ? 1 : 0;	
++}
++ 
++
++static int yaffs_WriteCheckpointObjects(yaffs_Device *dev)
++{
++	yaffs_Object *obj;
++	yaffs_CheckpointObject cp;
++	int i;
++	int ok = 1;
++	struct list_head *lh;
++
++	
++	/* Iterate through the objects in each hash entry,
++	 * dumping them to the checkpointing stream.
++	 */
++	 
++	 for(i = 0; ok &&  i <  YAFFS_NOBJECT_BUCKETS; i++){
++	 	list_for_each(lh, &dev->objectBucket[i].list) {
++			if (lh) {
++				obj = list_entry(lh, yaffs_Object, hashLink);
++				if (!obj->deferedFree) {
++					yaffs_ObjectToCheckpointObject(&cp,obj);
++					cp.structType = sizeof(cp);
++
++					T(YAFFS_TRACE_CHECKPOINT,(
++						TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
++						cp.objectId,cp.parentId,cp.variantType,cp.chunkId,(unsigned) obj));
++						
++					ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++					
++					if(ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE){
++						ok = yaffs_WriteCheckpointTnodes(obj);
++					}
++				}
++			}
++		}
++	 }
++	 
++	 /* Dump end of list */
++	memset(&cp,0xFF,sizeof(yaffs_CheckpointObject));
++	cp.structType = sizeof(cp);
++	
++	if(ok)
++		ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
++		
++	return ok ? 1 : 0;
++}
++
++static int yaffs_ReadCheckpointObjects(yaffs_Device *dev)
++{
++	yaffs_Object *obj;
++	yaffs_CheckpointObject cp;
++	int ok = 1;
++	int done = 0;
++	yaffs_Object *hardList = NULL;
++	
++	while(ok && !done) {
++		ok = (yaffs_CheckpointRead(dev,&cp,sizeof(cp)) == sizeof(cp));
++		if(cp.structType != sizeof(cp)) {
++			T(YAFFS_TRACE_CHECKPOINT,(TSTR("struct size %d instead of %d ok %d"TENDSTR),
++				cp.structType,sizeof(cp),ok));
++			ok = 0;
++		}
++			
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("Checkpoint read object %d parent %d type %d chunk %d " TENDSTR),
++			cp.objectId,cp.parentId,cp.variantType,cp.chunkId));
++			
++		if(ok && cp.objectId == ~0)
++			done = 1;
++		else if(ok){
++			obj = yaffs_FindOrCreateObjectByNumber(dev,cp.objectId, cp.variantType);
++			if(obj) {
++				yaffs_CheckpointObjectToObject(obj,&cp);
++				if(obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
++					ok = yaffs_ReadCheckpointTnodes(obj);
++				} else if(obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++					obj->hardLinks.next =
++						    (struct list_head *)
++						    hardList;
++					hardList = obj;
++				}
++			   
++			}
++		}
++	}
++	
++	if(ok)
++		yaffs_HardlinkFixup(dev,hardList);
++	
++	return ok ? 1 : 0;
++}
++
++static int yaffs_WriteCheckpointSum(yaffs_Device *dev)
++{
++	__u32 checkpointSum;
++	int ok;
++	
++	yaffs_GetCheckpointSum(dev,&checkpointSum);
++	
++	ok = (yaffs_CheckpointWrite(dev,&checkpointSum,sizeof(checkpointSum)) == sizeof(checkpointSum));
++	
++	if(!ok)
++		return 0;
++	
++	return 1;
++}
++
++static int yaffs_ReadCheckpointSum(yaffs_Device *dev)
++{
++	__u32 checkpointSum0;
++	__u32 checkpointSum1;
++	int ok;
++	
++	yaffs_GetCheckpointSum(dev,&checkpointSum0);
++	
++	ok = (yaffs_CheckpointRead(dev,&checkpointSum1,sizeof(checkpointSum1)) == sizeof(checkpointSum1));
++	
++	if(!ok)
++		return 0;
++		
++	if(checkpointSum0 != checkpointSum1)
++		return 0;
++	
++	return 1;
++}
++
++
++static int yaffs_WriteCheckpointData(yaffs_Device *dev)
++{
++
++	int ok = 1;
++	
++	if(dev->skipCheckpointWrite || !dev->isYaffs2){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("skipping checkpoint write" TENDSTR)));
++		ok = 0;
++	}
++		
++	if(ok)
++		ok = yaffs_CheckpointOpen(dev,1);
++	
++	if(ok){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint validity" TENDSTR)));
++		ok = yaffs_WriteCheckpointValidityMarker(dev,1);
++	}
++	if(ok){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint device" TENDSTR)));
++		ok = yaffs_WriteCheckpointDevice(dev);
++	}
++	if(ok){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint objects" TENDSTR)));
++		ok = yaffs_WriteCheckpointObjects(dev);
++	}
++	if(ok){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("write checkpoint validity" TENDSTR)));
++		ok = yaffs_WriteCheckpointValidityMarker(dev,0);
++	}
++	
++	if(ok){
++		ok = yaffs_WriteCheckpointSum(dev);
++	}
++	
++	
++	if(!yaffs_CheckpointClose(dev))
++		 ok = 0;
++		 
++	if(ok)
++	    	dev->isCheckpointed = 1;
++	 else 
++	 	dev->isCheckpointed = 0;
++
++	return dev->isCheckpointed;
++}
++
++static int yaffs_ReadCheckpointData(yaffs_Device *dev)
++{
++	int ok = 1;
++	
++	if(dev->skipCheckpointRead || !dev->isYaffs2){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("skipping checkpoint read" TENDSTR)));
++		ok = 0;
++	}
++	
++	if(ok)
++		ok = yaffs_CheckpointOpen(dev,0); /* open for read */
++	
++	if(ok){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint validity" TENDSTR)));	
++		ok = yaffs_ReadCheckpointValidityMarker(dev,1);
++	}
++	if(ok){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint device" TENDSTR)));
++		ok = yaffs_ReadCheckpointDevice(dev);
++	}
++	if(ok){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint objects" TENDSTR)));	
++		ok = yaffs_ReadCheckpointObjects(dev);
++	}
++	if(ok){
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint validity" TENDSTR)));
++		ok = yaffs_ReadCheckpointValidityMarker(dev,0);
++	}
++	
++	if(ok){
++		ok = yaffs_ReadCheckpointSum(dev);
++		T(YAFFS_TRACE_CHECKPOINT,(TSTR("read checkpoint checksum %d" TENDSTR),ok));
++	}
++
++	if(!yaffs_CheckpointClose(dev))
++		ok = 0;
++
++	if(ok)
++	    	dev->isCheckpointed = 1;
++	 else 
++	 	dev->isCheckpointed = 0;
++
++	return ok ? 1 : 0;
++
++}
++
++static void yaffs_InvalidateCheckpoint(yaffs_Device *dev)
++{
++	if(dev->isCheckpointed || 
++	   dev->blocksInCheckpoint > 0){
++		dev->isCheckpointed = 0;
++		yaffs_CheckpointInvalidateStream(dev);
++		if(dev->superBlock && dev->markSuperBlockDirty)
++			dev->markSuperBlockDirty(dev->superBlock);
++	}
++}
++
++
++int yaffs_CheckpointSave(yaffs_Device *dev)
++{
++
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("save entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++
++	yaffs_VerifyObjects(dev);
++	yaffs_VerifyBlocks(dev);
++	yaffs_VerifyFreeChunks(dev);
++
++	if(!dev->isCheckpointed) {
++		yaffs_InvalidateCheckpoint(dev);
++		yaffs_WriteCheckpointData(dev);
++	}
++	
++	T(YAFFS_TRACE_ALWAYS,(TSTR("save exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++
++	return dev->isCheckpointed;
++}
++
++int yaffs_CheckpointRestore(yaffs_Device *dev)
++{
++	int retval;
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++		
++	retval = yaffs_ReadCheckpointData(dev);
++
++	if(dev->isCheckpointed){
++		yaffs_VerifyObjects(dev);
++		yaffs_VerifyBlocks(dev);
++		yaffs_VerifyFreeChunks(dev);
++	}
++
++	T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
++	
++	return retval;
++}
++
++/*--------------------- File read/write ------------------------
++ * Read and write have very similar structures.
++ * In general the read/write has three parts to it
++ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
++ * Some complete chunks
++ * An incomplete chunk to end off with
++ *
++ * Curve-balls: the first chunk might also be the last chunk.
++ */
++
++int yaffs_ReadDataFromFile(yaffs_Object * in, __u8 * buffer, loff_t offset,
++			   int nBytes)
++{
++
++	int chunk;
++	int start;
++	int nToCopy;
++	int n = nBytes;
++	int nDone = 0;
++	yaffs_ChunkCache *cache;
++
++	yaffs_Device *dev;
++
++	dev = in->myDev;
++
++	while (n > 0) {
++		//chunk = offset / dev->nDataBytesPerChunk + 1;
++		//start = offset % dev->nDataBytesPerChunk;
++		yaffs_AddrToChunk(dev,offset,&chunk,&start);
++		chunk++;
++
++		/* OK now check for the curveball where the start and end are in
++		 * the same chunk.      
++		 */
++		if ((start + n) < dev->nDataBytesPerChunk) {
++			nToCopy = n;
++		} else {
++			nToCopy = dev->nDataBytesPerChunk - start;
++		}
++
++		cache = yaffs_FindChunkCache(in, chunk);
++
++		/* If the chunk is already in the cache or it is less than a whole chunk
++		 * then use the cache (if there is caching)
++		 * else bypass the cache.
++		 */
++		if (cache || nToCopy != dev->nDataBytesPerChunk) {
++			if (dev->nShortOpCaches > 0) {
++
++				/* If we can't find the data in the cache, then load it up. */
++
++				if (!cache) {
++					cache = yaffs_GrabChunkCache(in->myDev);
++					cache->object = in;
++					cache->chunkId = chunk;
++					cache->dirty = 0;
++					cache->locked = 0;
++					yaffs_ReadChunkDataFromObject(in, chunk,
++								      cache->
++								      data);
++					cache->nBytes = 0;
++				}
++
++				yaffs_UseChunkCache(dev, cache, 0);
++
++				cache->locked = 1;
++
++#ifdef CONFIG_YAFFS_WINCE
++				yfsd_UnlockYAFFS(TRUE);
++#endif
++				memcpy(buffer, &cache->data[start], nToCopy);
++
++#ifdef CONFIG_YAFFS_WINCE
++				yfsd_LockYAFFS(TRUE);
++#endif
++				cache->locked = 0;
++			} else {
++				/* Read into the local buffer then copy..*/
++
++				__u8 *localBuffer =
++				    yaffs_GetTempBuffer(dev, __LINE__);
++				yaffs_ReadChunkDataFromObject(in, chunk,
++							      localBuffer);
++#ifdef CONFIG_YAFFS_WINCE
++				yfsd_UnlockYAFFS(TRUE);
++#endif
++				memcpy(buffer, &localBuffer[start], nToCopy);
++
++#ifdef CONFIG_YAFFS_WINCE
++				yfsd_LockYAFFS(TRUE);
++#endif
++				yaffs_ReleaseTempBuffer(dev, localBuffer,
++							__LINE__);
++			}
++
++		} else {
++#ifdef CONFIG_YAFFS_WINCE
++			__u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
++
++			/* Under WinCE can't do direct transfer. Need to use a local buffer.
++			 * This is because we otherwise screw up WinCE's memory mapper
++			 */
++			yaffs_ReadChunkDataFromObject(in, chunk, localBuffer);
++
++#ifdef CONFIG_YAFFS_WINCE
++			yfsd_UnlockYAFFS(TRUE);
++#endif
++			memcpy(buffer, localBuffer, dev->nDataBytesPerChunk);
++
++#ifdef CONFIG_YAFFS_WINCE
++			yfsd_LockYAFFS(TRUE);
++			yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
++#endif
++
++#else
++			/* A full chunk. Read directly into the supplied buffer. */
++			yaffs_ReadChunkDataFromObject(in, chunk, buffer);
++#endif
++		}
++
++		n -= nToCopy;
++		offset += nToCopy;
++		buffer += nToCopy;
++		nDone += nToCopy;
++
++	}
++
++	return nDone;
++}
++
++int yaffs_WriteDataToFile(yaffs_Object * in, const __u8 * buffer, loff_t offset,
++			  int nBytes, int writeThrough)
++{
++
++	int chunk;
++	int start;
++	int nToCopy;
++	int n = nBytes;
++	int nDone = 0;
++	int nToWriteBack;
++	int startOfWrite = offset;
++	int chunkWritten = 0;
++	int nBytesRead;
++
++	yaffs_Device *dev;
++
++	dev = in->myDev;
++
++	while (n > 0 && chunkWritten >= 0) {
++		//chunk = offset / dev->nDataBytesPerChunk + 1;
++		//start = offset % dev->nDataBytesPerChunk;
++		yaffs_AddrToChunk(dev,offset,&chunk,&start);
++		chunk++;
++
++		/* OK now check for the curveball where the start and end are in
++		 * the same chunk.
++		 */
++
++		if ((start + n) < dev->nDataBytesPerChunk) {
++			nToCopy = n;
++
++			/* Now folks, to calculate how many bytes to write back....
++			 * If we're overwriting and not writing to then end of file then
++			 * we need to write back as much as was there before.
++			 */
++
++			nBytesRead =
++			    in->variant.fileVariant.fileSize -
++			    ((chunk - 1) * dev->nDataBytesPerChunk);
++
++			if (nBytesRead > dev->nDataBytesPerChunk) {
++				nBytesRead = dev->nDataBytesPerChunk;
++			}
++
++			nToWriteBack =
++			    (nBytesRead >
++			     (start + n)) ? nBytesRead : (start + n);
++
++		} else {
++			nToCopy = dev->nDataBytesPerChunk - start;
++			nToWriteBack = dev->nDataBytesPerChunk;
++		}
++
++		if (nToCopy != dev->nDataBytesPerChunk) {
++			/* An incomplete start or end chunk (or maybe both start and end chunk) */
++			if (dev->nShortOpCaches > 0) {
++				yaffs_ChunkCache *cache;
++				/* If we can't find the data in the cache, then load the cache */
++				cache = yaffs_FindChunkCache(in, chunk);
++				
++				if (!cache
++				    && yaffs_CheckSpaceForAllocation(in->
++								     myDev)) {
++					cache = yaffs_GrabChunkCache(in->myDev);
++					cache->object = in;
++					cache->chunkId = chunk;
++					cache->dirty = 0;
++					cache->locked = 0;
++					yaffs_ReadChunkDataFromObject(in, chunk,
++								      cache->
++								      data);
++				}
++				else if(cache && 
++				        !cache->dirty &&
++					!yaffs_CheckSpaceForAllocation(in->myDev)){
++					/* Drop the cache if it was a read cache item and
++					 * no space check has been made for it.
++					 */ 
++					 cache = NULL;
++				}
++
++				if (cache) {
++					yaffs_UseChunkCache(dev, cache, 1);
++					cache->locked = 1;
++#ifdef CONFIG_YAFFS_WINCE
++					yfsd_UnlockYAFFS(TRUE);
++#endif
++
++					memcpy(&cache->data[start], buffer,
++					       nToCopy);
++
++#ifdef CONFIG_YAFFS_WINCE
++					yfsd_LockYAFFS(TRUE);
++#endif
++					cache->locked = 0;
++					cache->nBytes = nToWriteBack;
++
++					if (writeThrough) {
++						chunkWritten =
++						    yaffs_WriteChunkDataToObject
++						    (cache->object,
++						     cache->chunkId,
++						     cache->data, cache->nBytes,
++						     1);
++						cache->dirty = 0;
++					}
++
++				} else {
++					chunkWritten = -1;	/* fail the write */
++				}
++			} else {
++				/* An incomplete start or end chunk (or maybe both start and end chunk)
++				 * Read into the local buffer then copy, then copy over and write back.
++				 */
++
++				__u8 *localBuffer =
++				    yaffs_GetTempBuffer(dev, __LINE__);
++
++				yaffs_ReadChunkDataFromObject(in, chunk,
++							      localBuffer);
++
++#ifdef CONFIG_YAFFS_WINCE
++				yfsd_UnlockYAFFS(TRUE);
++#endif
++
++				memcpy(&localBuffer[start], buffer, nToCopy);
++
++#ifdef CONFIG_YAFFS_WINCE
++				yfsd_LockYAFFS(TRUE);
++#endif
++				chunkWritten =
++				    yaffs_WriteChunkDataToObject(in, chunk,
++								 localBuffer,
++								 nToWriteBack,
++								 0);
++
++				yaffs_ReleaseTempBuffer(dev, localBuffer,
++							__LINE__);
++
++			}
++
++		} else {
++
++#ifdef CONFIG_YAFFS_WINCE
++			/* Under WinCE can't do direct transfer. Need to use a local buffer.
++			 * This is because we otherwise screw up WinCE's memory mapper
++			 */
++			__u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
++#ifdef CONFIG_YAFFS_WINCE
++			yfsd_UnlockYAFFS(TRUE);
++#endif
++			memcpy(localBuffer, buffer, dev->nDataBytesPerChunk);
++#ifdef CONFIG_YAFFS_WINCE
++			yfsd_LockYAFFS(TRUE);
++#endif
++			chunkWritten =
++			    yaffs_WriteChunkDataToObject(in, chunk, localBuffer,
++							 dev->nDataBytesPerChunk,
++							 0);
++			yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
++#else
++			/* A full chunk. Write directly from the supplied buffer. */
++			chunkWritten =
++			    yaffs_WriteChunkDataToObject(in, chunk, buffer,
++							 dev->nDataBytesPerChunk,
++							 0);
++#endif
++			/* Since we've overwritten the cached data, we better invalidate it. */
++			yaffs_InvalidateChunkCache(in, chunk);
++		}
++
++		if (chunkWritten >= 0) {
++			n -= nToCopy;
++			offset += nToCopy;
++			buffer += nToCopy;
++			nDone += nToCopy;
++		}
++
++	}
++
++	/* Update file object */
++
++	if ((startOfWrite + nDone) > in->variant.fileVariant.fileSize) {
++		in->variant.fileVariant.fileSize = (startOfWrite + nDone);
++	}
++
++	in->dirty = 1;
++
++	return nDone;
++}
++
++
++/* ---------------------- File resizing stuff ------------------ */
++
++static void yaffs_PruneResizedChunks(yaffs_Object * in, int newSize)
++{
++
++	yaffs_Device *dev = in->myDev;
++	int oldFileSize = in->variant.fileVariant.fileSize;
++
++	int lastDel = 1 + (oldFileSize - 1) / dev->nDataBytesPerChunk;
++
++	int startDel = 1 + (newSize + dev->nDataBytesPerChunk - 1) /
++	    dev->nDataBytesPerChunk;
++	int i;
++	int chunkId;
++
++	/* Delete backwards so that we don't end up with holes if
++	 * power is lost part-way through the operation.
++	 */
++	for (i = lastDel; i >= startDel; i--) {
++		/* NB this could be optimised somewhat,
++		 * eg. could retrieve the tags and write them without
++		 * using yaffs_DeleteChunk
++		 */
++
++		chunkId = yaffs_FindAndDeleteChunkInFile(in, i, NULL);
++		if (chunkId > 0) {
++			if (chunkId <
++			    (dev->internalStartBlock * dev->nChunksPerBlock)
++			    || chunkId >=
++			    ((dev->internalEndBlock +
++			      1) * dev->nChunksPerBlock)) {
++				T(YAFFS_TRACE_ALWAYS,
++				  (TSTR("Found daft chunkId %d for %d" TENDSTR),
++				   chunkId, i));
++			} else {
++				in->nDataChunks--;
++				yaffs_DeleteChunk(dev, chunkId, 1, __LINE__);
++			}
++		}
++	}
++
++}
++
++int yaffs_ResizeFile(yaffs_Object * in, loff_t newSize)
++{
++
++	int oldFileSize = in->variant.fileVariant.fileSize;
++	int newSizeOfPartialChunk;
++	int newFullChunks;
++	
++	yaffs_Device *dev = in->myDev;
++
++	yaffs_AddrToChunk(dev, newSize, &newFullChunks, &newSizeOfPartialChunk);
++
++	yaffs_FlushFilesChunkCache(in);
++	yaffs_InvalidateWholeChunkCache(in);
++
++	yaffs_CheckGarbageCollection(dev);
++
++	if (in->variantType != YAFFS_OBJECT_TYPE_FILE) {
++		return yaffs_GetFileSize(in);
++	}
++
++	if (newSize == oldFileSize) {
++		return oldFileSize;
++	}
++
++	if (newSize < oldFileSize) {
++
++		yaffs_PruneResizedChunks(in, newSize);
++
++		if (newSizeOfPartialChunk != 0) {
++			int lastChunk = 1 + newFullChunks;
++			
++			__u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
++
++			/* Got to read and rewrite the last chunk with its new size and zero pad */
++			yaffs_ReadChunkDataFromObject(in, lastChunk,
++						      localBuffer);
++
++			memset(localBuffer + newSizeOfPartialChunk, 0,
++			       dev->nDataBytesPerChunk - newSizeOfPartialChunk);
++
++			yaffs_WriteChunkDataToObject(in, lastChunk, localBuffer,
++						     newSizeOfPartialChunk, 1);
++
++			yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
++		}
++
++		in->variant.fileVariant.fileSize = newSize;
++
++		yaffs_PruneFileStructure(dev, &in->variant.fileVariant);
++	} else {
++		/* newsSize > oldFileSize */
++		in->variant.fileVariant.fileSize = newSize;
++	}
++
++		
++	
++	/* Write a new object header.
++	 * show we've shrunk the file, if need be
++	 * Do this only if the file is not in the deleted directories.
++	 */
++	if (in->parent->objectId != YAFFS_OBJECTID_UNLINKED &&
++	    in->parent->objectId != YAFFS_OBJECTID_DELETED) {
++		yaffs_UpdateObjectHeader(in, NULL, 0,
++					 (newSize < oldFileSize) ? 1 : 0, 0);
++	}
++
++	return newSize;
++}
++
++loff_t yaffs_GetFileSize(yaffs_Object * obj)
++{
++	obj = yaffs_GetEquivalentObject(obj);
++
++	switch (obj->variantType) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		return obj->variant.fileVariant.fileSize;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		return yaffs_strlen(obj->variant.symLinkVariant.alias);
++	default:
++		return 0;
++	}
++}
++
++
++
++int yaffs_FlushFile(yaffs_Object * in, int updateTime)
++{
++	int retVal;
++	if (in->dirty) {
++		yaffs_FlushFilesChunkCache(in);
++		if (updateTime) {
++#ifdef CONFIG_YAFFS_WINCE
++			yfsd_WinFileTimeNow(in->win_mtime);
++#else
++
++			in->yst_mtime = Y_CURRENT_TIME;
++
++#endif
++		}
++
++		retVal =
++		    (yaffs_UpdateObjectHeader(in, NULL, 0, 0, 0) >=
++		     0) ? YAFFS_OK : YAFFS_FAIL;
++	} else {
++		retVal = YAFFS_OK;
++	}
++
++	return retVal;
++
++}
++
++static int yaffs_DoGenericObjectDeletion(yaffs_Object * in)
++{
++
++	/* First off, invalidate the file's data in the cache, without flushing. */
++	yaffs_InvalidateWholeChunkCache(in);
++
++	if (in->myDev->isYaffs2 && (in->parent != in->myDev->deletedDir)) {
++		/* Move to the unlinked directory so we have a record that it was deleted. */
++		yaffs_ChangeObjectName(in, in->myDev->deletedDir,"deleted", 0, 0);
++
++	}
++
++	yaffs_RemoveObjectFromDirectory(in);
++	yaffs_DeleteChunk(in->myDev, in->chunkId, 1, __LINE__);
++	in->chunkId = -1;
++
++	yaffs_FreeObject(in);
++	return YAFFS_OK;
++
++}
++
++/* yaffs_DeleteFile deletes the whole file data
++ * and the inode associated with the file.
++ * It does not delete the links associated with the file.
++ */
++static int yaffs_UnlinkFile(yaffs_Object * in)
++{
++
++	int retVal;
++	int immediateDeletion = 0;
++
++	if (1) {
++#ifdef __KERNEL__
++		if (!in->myInode) {
++			immediateDeletion = 1;
++
++		}
++#else
++		if (in->inUse <= 0) {
++			immediateDeletion = 1;
++
++		}
++#endif
++		if (immediateDeletion) {
++			retVal =
++			    yaffs_ChangeObjectName(in, in->myDev->deletedDir,
++						   "deleted", 0, 0);
++			T(YAFFS_TRACE_TRACING,
++			  (TSTR("yaffs: immediate deletion of file %d" TENDSTR),
++			   in->objectId));
++			in->deleted = 1;
++			in->myDev->nDeletedFiles++;
++			if (0 && in->myDev->isYaffs2) {
++				yaffs_ResizeFile(in, 0);
++			}
++			yaffs_SoftDeleteFile(in);
++		} else {
++			retVal =
++			    yaffs_ChangeObjectName(in, in->myDev->unlinkedDir,
++						   "unlinked", 0, 0);
++		}
++
++	}
++	return retVal;
++}
++
++int yaffs_DeleteFile(yaffs_Object * in)
++{
++	int retVal = YAFFS_OK;
++
++	if (in->nDataChunks > 0) {
++		/* Use soft deletion if there is data in the file */
++		if (!in->unlinked) {
++			retVal = yaffs_UnlinkFile(in);
++		}
++		if (retVal == YAFFS_OK && in->unlinked && !in->deleted) {
++			in->deleted = 1;
++			in->myDev->nDeletedFiles++;
++			yaffs_SoftDeleteFile(in);
++		}
++		return in->deleted ? YAFFS_OK : YAFFS_FAIL;
++	} else {
++		/* The file has no data chunks so we toss it immediately */
++		yaffs_FreeTnode(in->myDev, in->variant.fileVariant.top);
++		in->variant.fileVariant.top = NULL;
++		yaffs_DoGenericObjectDeletion(in);
++
++		return YAFFS_OK;
++	}
++}
++
++static int yaffs_DeleteDirectory(yaffs_Object * in)
++{
++	/* First check that the directory is empty. */
++	if (list_empty(&in->variant.directoryVariant.children)) {
++		return yaffs_DoGenericObjectDeletion(in);
++	}
++
++	return YAFFS_FAIL;
++
++}
++
++static int yaffs_DeleteSymLink(yaffs_Object * in)
++{
++	YFREE(in->variant.symLinkVariant.alias);
++
++	return yaffs_DoGenericObjectDeletion(in);
++}
++
++static int yaffs_DeleteHardLink(yaffs_Object * in)
++{
++	/* remove this hardlink from the list assocaited with the equivalent
++	 * object
++	 */
++	list_del(&in->hardLinks);
++	return yaffs_DoGenericObjectDeletion(in);
++}
++
++static void yaffs_DestroyObject(yaffs_Object * obj)
++{
++	switch (obj->variantType) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		yaffs_DeleteFile(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		yaffs_DeleteDirectory(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		yaffs_DeleteSymLink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		yaffs_DeleteHardLink(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		yaffs_DoGenericObjectDeletion(obj);
++		break;
++	case YAFFS_OBJECT_TYPE_UNKNOWN:
++		break;		/* should not happen. */
++	}
++}
++
++static int yaffs_UnlinkWorker(yaffs_Object * obj)
++{
++
++	if (obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++		return yaffs_DeleteHardLink(obj);
++	} else if (!list_empty(&obj->hardLinks)) {
++		/* Curve ball: We're unlinking an object that has a hardlink.
++		 *
++		 * This problem arises because we are not strictly following
++		 * The Linux link/inode model.
++		 *
++		 * We can't really delete the object.
++		 * Instead, we do the following:
++		 * - Select a hardlink.
++		 * - Unhook it from the hard links
++		 * - Unhook it from its parent directory (so that the rename can work)
++		 * - Rename the object to the hardlink's name.
++		 * - Delete the hardlink
++		 */
++
++		yaffs_Object *hl;
++		int retVal;
++		YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++		hl = list_entry(obj->hardLinks.next, yaffs_Object, hardLinks);
++
++		list_del_init(&hl->hardLinks);
++		list_del_init(&hl->siblings);
++
++		yaffs_GetObjectName(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++
++		retVal = yaffs_ChangeObjectName(obj, hl->parent, name, 0, 0);
++
++		if (retVal == YAFFS_OK) {
++			retVal = yaffs_DoGenericObjectDeletion(hl);
++		}
++		return retVal;
++
++	} else {
++		switch (obj->variantType) {
++		case YAFFS_OBJECT_TYPE_FILE:
++			return yaffs_UnlinkFile(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_DIRECTORY:
++			return yaffs_DeleteDirectory(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_SYMLINK:
++			return yaffs_DeleteSymLink(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_SPECIAL:
++			return yaffs_DoGenericObjectDeletion(obj);
++			break;
++		case YAFFS_OBJECT_TYPE_HARDLINK:
++		case YAFFS_OBJECT_TYPE_UNKNOWN:
++		default:
++			return YAFFS_FAIL;
++		}
++	}
++}
++
++
++static int yaffs_UnlinkObject( yaffs_Object *obj)
++{
++
++	if (obj && obj->unlinkAllowed) {
++		return yaffs_UnlinkWorker(obj);
++	}
++
++	return YAFFS_FAIL;
++
++}
++int yaffs_Unlink(yaffs_Object * dir, const YCHAR * name)
++{
++	yaffs_Object *obj;
++
++	obj = yaffs_FindObjectByName(dir, name);
++	return yaffs_UnlinkObject(obj);
++}
++
++/*----------------------- Initialisation Scanning ---------------------- */
++
++static void yaffs_HandleShadowedObject(yaffs_Device * dev, int objId,
++				       int backwardScanning)
++{
++	yaffs_Object *obj;
++
++	if (!backwardScanning) {
++		/* Handle YAFFS1 forward scanning case
++		 * For YAFFS1 we always do the deletion
++		 */
++
++	} else {
++		/* Handle YAFFS2 case (backward scanning)
++		 * If the shadowed object exists then ignore.
++		 */
++		if (yaffs_FindObjectByNumber(dev, objId)) {
++			return;
++		}
++	}
++
++	/* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
++	 * We put it in unlinked dir to be cleaned up after the scanning
++	 */
++	obj =
++	    yaffs_FindOrCreateObjectByNumber(dev, objId,
++					     YAFFS_OBJECT_TYPE_FILE);
++	yaffs_AddObjectToDirectory(dev->unlinkedDir, obj);
++	obj->variant.fileVariant.shrinkSize = 0;
++	obj->valid = 1;		/* So that we don't read any other info for this file */
++
++}
++
++typedef struct {
++	int seq;
++	int block;
++} yaffs_BlockIndex;
++
++
++static void yaffs_HardlinkFixup(yaffs_Device *dev, yaffs_Object *hardList)
++{
++	yaffs_Object *hl;
++	yaffs_Object *in;
++	
++	while (hardList) {
++		hl = hardList;
++		hardList = (yaffs_Object *) (hardList->hardLinks.next);
++
++		in = yaffs_FindObjectByNumber(dev,
++					      hl->variant.hardLinkVariant.
++					      equivalentObjectId);
++
++		if (in) {
++			/* Add the hardlink pointers */
++			hl->variant.hardLinkVariant.equivalentObject = in;
++			list_add(&hl->hardLinks, &in->hardLinks);
++		} else {
++			/* Todo Need to report/handle this better.
++			 * Got a problem... hardlink to a non-existant object
++			 */
++			hl->variant.hardLinkVariant.equivalentObject = NULL;
++			INIT_LIST_HEAD(&hl->hardLinks);
++
++		}
++
++	}
++
++}
++
++
++
++
++
++static int ybicmp(const void *a, const void *b){
++    register int aseq = ((yaffs_BlockIndex *)a)->seq;
++    register int bseq = ((yaffs_BlockIndex *)b)->seq;
++    register int ablock = ((yaffs_BlockIndex *)a)->block;
++    register int bblock = ((yaffs_BlockIndex *)b)->block;
++    if( aseq == bseq )
++        return ablock - bblock;
++    else
++        return aseq - bseq;
++
++}
++
++static int yaffs_Scan(yaffs_Device * dev)
++{
++	yaffs_ExtendedTags tags;
++	int blk;
++	int blockIterator;
++	int startIterator;
++	int endIterator;
++	int nBlocksToScan = 0;
++	int result;
++
++	int chunk;
++	int c;
++	int deleted;
++	yaffs_BlockState state;
++	yaffs_Object *hardList = NULL;
++	yaffs_BlockInfo *bi;
++	int sequenceNumber;
++	yaffs_ObjectHeader *oh;
++	yaffs_Object *in;
++	yaffs_Object *parent;
++	int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
++	
++	int alloc_failed = 0;
++	
++
++	__u8 *chunkData;
++
++	yaffs_BlockIndex *blockIndex = NULL;
++
++	if (dev->isYaffs2) {
++		T(YAFFS_TRACE_SCAN,
++		  (TSTR("yaffs_Scan is not for YAFFS2!" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++	
++	//TODO  Throw all the yaffs2 stuuf out of yaffs_Scan since it is only for yaffs1 format.
++	
++	T(YAFFS_TRACE_SCAN,
++	  (TSTR("yaffs_Scan starts  intstartblk %d intendblk %d..." TENDSTR),
++	   dev->internalStartBlock, dev->internalEndBlock));
++
++	chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++
++	dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++	if (dev->isYaffs2) {
++		blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
++		if(!blockIndex)
++			return YAFFS_FAIL;
++	}
++
++	/* Scan all the blocks to determine their state */
++	for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
++		bi = yaffs_GetBlockInfo(dev, blk);
++		yaffs_ClearChunkBits(dev, blk);
++		bi->pagesInUse = 0;
++		bi->softDeletions = 0;
++
++		yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
++
++		bi->blockState = state;
++		bi->sequenceNumber = sequenceNumber;
++
++		T(YAFFS_TRACE_SCAN_DEBUG,
++		  (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
++		   state, sequenceNumber));
++
++		if (state == YAFFS_BLOCK_STATE_DEAD) {
++			T(YAFFS_TRACE_BAD_BLOCKS,
++			  (TSTR("block %d is bad" TENDSTR), blk));
++		} else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++			T(YAFFS_TRACE_SCAN_DEBUG,
++			  (TSTR("Block empty " TENDSTR)));
++			dev->nErasedBlocks++;
++			dev->nFreeChunks += dev->nChunksPerBlock;
++		} else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++
++			/* Determine the highest sequence number */
++			if (dev->isYaffs2 &&
++			    sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++			    sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++
++				blockIndex[nBlocksToScan].seq = sequenceNumber;
++				blockIndex[nBlocksToScan].block = blk;
++
++				nBlocksToScan++;
++
++				if (sequenceNumber >= dev->sequenceNumber) {
++					dev->sequenceNumber = sequenceNumber;
++				}
++			} else if (dev->isYaffs2) {
++				/* TODO: Nasty sequence number! */
++				T(YAFFS_TRACE_SCAN,
++				  (TSTR
++				   ("Block scanning block %d has bad sequence number %d"
++				    TENDSTR), blk, sequenceNumber));
++
++			}
++		}
++	}
++
++	/* Sort the blocks
++	 * Dungy old bubble sort for now...
++	 */
++	if (dev->isYaffs2) {
++		yaffs_BlockIndex temp;
++		int i;
++		int j;
++
++		for (i = 0; i < nBlocksToScan; i++)
++			for (j = i + 1; j < nBlocksToScan; j++)
++				if (blockIndex[i].seq > blockIndex[j].seq) {
++					temp = blockIndex[j];
++					blockIndex[j] = blockIndex[i];
++					blockIndex[i] = temp;
++				}
++	}
++
++	/* Now scan the blocks looking at the data. */
++	if (dev->isYaffs2) {
++		startIterator = 0;
++		endIterator = nBlocksToScan - 1;
++		T(YAFFS_TRACE_SCAN_DEBUG,
++		  (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
++	} else {
++		startIterator = dev->internalStartBlock;
++		endIterator = dev->internalEndBlock;
++	}
++
++	/* For each block.... */
++	for (blockIterator = startIterator; !alloc_failed && blockIterator <= endIterator;
++	     blockIterator++) {
++
++		if (dev->isYaffs2) {
++			/* get the block to scan in the correct order */
++			blk = blockIndex[blockIterator].block;
++		} else {
++			blk = blockIterator;
++		}
++
++		bi = yaffs_GetBlockInfo(dev, blk);
++		state = bi->blockState;
++
++		deleted = 0;
++
++		/* For each chunk in each block that needs scanning....*/
++		for (c = 0; !alloc_failed && c < dev->nChunksPerBlock &&
++		     state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
++			/* Read the tags and decide what to do */
++			chunk = blk * dev->nChunksPerBlock + c;
++
++			result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
++							&tags);
++
++			/* Let's have a good look at this chunk... */
++
++			if (!dev->isYaffs2 && tags.chunkDeleted) {
++				/* YAFFS1 only...
++				 * A deleted chunk
++				 */
++				deleted++;
++				dev->nFreeChunks++;
++				/*T((" %d %d deleted\n",blk,c)); */
++			} else if (!tags.chunkUsed) {
++				/* An unassigned chunk in the block
++				 * This means that either the block is empty or 
++				 * this is the one being allocated from
++				 */
++
++				if (c == 0) {
++					/* We're looking at the first chunk in the block so the block is unused */
++					state = YAFFS_BLOCK_STATE_EMPTY;
++					dev->nErasedBlocks++;
++				} else {
++					/* this is the block being allocated from */
++					T(YAFFS_TRACE_SCAN,
++					  (TSTR
++					   (" Allocating from %d %d" TENDSTR),
++					   blk, c));
++					state = YAFFS_BLOCK_STATE_ALLOCATING;
++					dev->allocationBlock = blk;
++					dev->allocationPage = c;
++					dev->allocationBlockFinder = blk;	
++					/* Set it to here to encourage the allocator to go forth from here. */
++					
++					/* Yaffs2 sanity check:
++					 * This should be the one with the highest sequence number
++					 */
++					if (dev->isYaffs2
++					    && (dev->sequenceNumber !=
++						bi->sequenceNumber)) {
++						T(YAFFS_TRACE_ALWAYS,
++						  (TSTR
++						   ("yaffs: Allocation block %d was not highest sequence id:"
++						    " block seq = %d, dev seq = %d"
++						    TENDSTR), blk,bi->sequenceNumber,dev->sequenceNumber));
++					}
++				}
++
++				dev->nFreeChunks += (dev->nChunksPerBlock - c);
++			} else if (tags.chunkId > 0) {
++				/* chunkId > 0 so it is a data chunk... */
++				unsigned int endpos;
++
++				yaffs_SetChunkBit(dev, blk, c);
++				bi->pagesInUse++;
++
++				in = yaffs_FindOrCreateObjectByNumber(dev,
++								      tags.
++								      objectId,
++								      YAFFS_OBJECT_TYPE_FILE);
++				/* PutChunkIntoFile checks for a clash (two data chunks with
++				 * the same chunkId).
++				 */
++				 
++				if(!in)
++					alloc_failed = 1;
++
++				if(in){
++					if(!yaffs_PutChunkIntoFile(in, tags.chunkId, chunk,1))
++						alloc_failed = 1;
++				}
++				
++				endpos =
++				    (tags.chunkId - 1) * dev->nDataBytesPerChunk +
++				    tags.byteCount;
++				if (in && 
++				    in->variantType == YAFFS_OBJECT_TYPE_FILE
++				    && in->variant.fileVariant.scannedFileSize <
++				    endpos) {
++					in->variant.fileVariant.
++					    scannedFileSize = endpos;
++					if (!dev->useHeaderFileSize) {
++						in->variant.fileVariant.
++						    fileSize =
++						    in->variant.fileVariant.
++						    scannedFileSize;
++					}
++
++				}
++				/* T((" %d %d data %d %d\n",blk,c,tags.objectId,tags.chunkId));   */
++			} else {
++				/* chunkId == 0, so it is an ObjectHeader.
++				 * Thus, we read in the object header and make the object
++				 */
++				yaffs_SetChunkBit(dev, blk, c);
++				bi->pagesInUse++;
++
++				result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk,
++								chunkData,
++								NULL);
++
++				oh = (yaffs_ObjectHeader *) chunkData;
++
++				in = yaffs_FindObjectByNumber(dev,
++							      tags.objectId);
++				if (in && in->variantType != oh->type) {
++					/* This should not happen, but somehow
++					 * Wev'e ended up with an objectId that has been reused but not yet 
++					 * deleted, and worse still it has changed type. Delete the old object.
++					 */
++
++					yaffs_DestroyObject(in);
++
++					in = 0;
++				}
++
++				in = yaffs_FindOrCreateObjectByNumber(dev,
++								      tags.
++								      objectId,
++								      oh->type);
++
++				if(!in)
++					alloc_failed = 1;
++					
++				if (in && oh->shadowsObject > 0) {
++					yaffs_HandleShadowedObject(dev,
++								   oh->
++								   shadowsObject,
++								   0);
++				}
++
++				if (in && in->valid) {
++					/* We have already filled this one. We have a duplicate and need to resolve it. */
++
++					unsigned existingSerial = in->serial;
++					unsigned newSerial = tags.serialNumber;
++
++					if (dev->isYaffs2 ||
++					    ((existingSerial + 1) & 3) ==
++					    newSerial) {
++						/* Use new one - destroy the exisiting one */
++						yaffs_DeleteChunk(dev,
++								  in->chunkId,
++								  1, __LINE__);
++						in->valid = 0;
++					} else {
++						/* Use existing - destroy this one. */
++						yaffs_DeleteChunk(dev, chunk, 1,
++								  __LINE__);
++					}
++				}
++
++				if (in && !in->valid &&
++				    (tags.objectId == YAFFS_OBJECTID_ROOT ||
++				     tags.objectId == YAFFS_OBJECTID_LOSTNFOUND)) {
++					/* We only load some info, don't fiddle with directory structure */
++					in->valid = 1;
++					in->variantType = oh->type;
++
++					in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++					in->win_atime[0] = oh->win_atime[0];
++					in->win_ctime[0] = oh->win_ctime[0];
++					in->win_mtime[0] = oh->win_mtime[0];
++					in->win_atime[1] = oh->win_atime[1];
++					in->win_ctime[1] = oh->win_ctime[1];
++					in->win_mtime[1] = oh->win_mtime[1];
++#else
++					in->yst_uid = oh->yst_uid;
++					in->yst_gid = oh->yst_gid;
++					in->yst_atime = oh->yst_atime;
++					in->yst_mtime = oh->yst_mtime;
++					in->yst_ctime = oh->yst_ctime;
++					in->yst_rdev = oh->yst_rdev;
++#endif
++					in->chunkId = chunk;
++
++				} else if (in && !in->valid) {
++					/* we need to load this info */
++
++					in->valid = 1;
++					in->variantType = oh->type;
++
++					in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++					in->win_atime[0] = oh->win_atime[0];
++					in->win_ctime[0] = oh->win_ctime[0];
++					in->win_mtime[0] = oh->win_mtime[0];
++					in->win_atime[1] = oh->win_atime[1];
++					in->win_ctime[1] = oh->win_ctime[1];
++					in->win_mtime[1] = oh->win_mtime[1];
++#else
++					in->yst_uid = oh->yst_uid;
++					in->yst_gid = oh->yst_gid;
++					in->yst_atime = oh->yst_atime;
++					in->yst_mtime = oh->yst_mtime;
++					in->yst_ctime = oh->yst_ctime;
++					in->yst_rdev = oh->yst_rdev;
++#endif
++					in->chunkId = chunk;
++
++					yaffs_SetObjectName(in, oh->name);
++					in->dirty = 0;
++
++					/* directory stuff...
++					 * hook up to parent
++					 */
++
++					parent =
++					    yaffs_FindOrCreateObjectByNumber
++					    (dev, oh->parentObjectId,
++					     YAFFS_OBJECT_TYPE_DIRECTORY);
++					if (parent->variantType ==
++					    YAFFS_OBJECT_TYPE_UNKNOWN) {
++						/* Set up as a directory */
++						parent->variantType =
++						    YAFFS_OBJECT_TYPE_DIRECTORY;
++						INIT_LIST_HEAD(&parent->variant.
++							       directoryVariant.
++							       children);
++					} else if (parent->variantType !=
++						   YAFFS_OBJECT_TYPE_DIRECTORY)
++					{
++						/* Hoosterman, another problem....
++						 * We're trying to use a non-directory as a directory
++						 */
++
++						T(YAFFS_TRACE_ERROR,
++						  (TSTR
++						   ("yaffs tragedy: attempting to use non-directory as"
++						    " a directory in scan. Put in lost+found."
++						    TENDSTR)));
++						parent = dev->lostNFoundDir;
++					}
++
++					yaffs_AddObjectToDirectory(parent, in);
++
++					if (0 && (parent == dev->deletedDir ||
++						  parent == dev->unlinkedDir)) {
++						in->deleted = 1;	/* If it is unlinked at start up then it wants deleting */
++						dev->nDeletedFiles++;
++					}
++					/* Note re hardlinks.
++					 * Since we might scan a hardlink before its equivalent object is scanned
++					 * we put them all in a list.
++					 * After scanning is complete, we should have all the objects, so we run through this
++					 * list and fix up all the chains.              
++					 */
++
++					switch (in->variantType) {
++					case YAFFS_OBJECT_TYPE_UNKNOWN:	
++						/* Todo got a problem */
++						break;
++					case YAFFS_OBJECT_TYPE_FILE:
++						if (dev->isYaffs2
++						    && oh->isShrink) {
++							/* Prune back the shrunken chunks */
++							yaffs_PruneResizedChunks
++							    (in, oh->fileSize);
++							/* Mark the block as having a shrinkHeader */
++							bi->hasShrinkHeader = 1;
++						}
++
++						if (dev->useHeaderFileSize)
++
++							in->variant.fileVariant.
++							    fileSize =
++							    oh->fileSize;
++
++						break;
++					case YAFFS_OBJECT_TYPE_HARDLINK:
++						in->variant.hardLinkVariant.
++						    equivalentObjectId =
++						    oh->equivalentObjectId;
++						in->hardLinks.next =
++						    (struct list_head *)
++						    hardList;
++						hardList = in;
++						break;
++					case YAFFS_OBJECT_TYPE_DIRECTORY:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SPECIAL:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SYMLINK:	
++						in->variant.symLinkVariant.alias =
++						    yaffs_CloneString(oh->alias);
++						if(!in->variant.symLinkVariant.alias)
++							alloc_failed = 1;
++						break;
++					}
++
++					if (parent == dev->deletedDir) {
++						yaffs_DestroyObject(in);
++						bi->hasShrinkHeader = 1;
++					}
++				}
++			}
++		}
++
++		if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++			/* If we got this far while scanning, then the block is fully allocated.*/
++			state = YAFFS_BLOCK_STATE_FULL;
++		}
++
++		bi->blockState = state;
++
++		/* Now let's see if it was dirty */
++		if (bi->pagesInUse == 0 &&
++		    !bi->hasShrinkHeader &&
++		    bi->blockState == YAFFS_BLOCK_STATE_FULL) {
++			yaffs_BlockBecameDirty(dev, blk);
++		}
++
++	}
++
++	if (blockIndex) {
++		YFREE(blockIndex);
++	}
++	
++	
++	/* Ok, we've done all the scanning.
++	 * Fix up the hard link chains.
++	 * We should now have scanned all the objects, now it's time to add these 
++	 * hardlinks.
++	 */
++
++	yaffs_HardlinkFixup(dev,hardList);
++
++	/* Handle the unlinked files. Since they were left in an unlinked state we should
++	 * just delete them.
++	 */
++	{
++		struct list_head *i;
++		struct list_head *n;
++
++		yaffs_Object *l;
++		/* Soft delete all the unlinked files */
++		list_for_each_safe(i, n,
++				   &dev->unlinkedDir->variant.directoryVariant.
++				   children) {
++			if (i) {
++				l = list_entry(i, yaffs_Object, siblings);
++				yaffs_DestroyObject(l);
++			}
++		}
++	}
++
++	yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++
++	if(alloc_failed){
++		return YAFFS_FAIL;
++	}
++	
++	T(YAFFS_TRACE_SCAN, (TSTR("yaffs_Scan ends" TENDSTR)));
++	
++
++	return YAFFS_OK;
++}
++
++static void yaffs_CheckObjectDetailsLoaded(yaffs_Object *in)
++{
++	__u8 *chunkData;
++	yaffs_ObjectHeader *oh;
++	yaffs_Device *dev = in->myDev;
++	yaffs_ExtendedTags tags;
++	int result;
++	int alloc_failed = 0;
++
++	if(!in)
++		return;
++		
++#if 0
++	T(YAFFS_TRACE_SCAN,(TSTR("details for object %d %s loaded" TENDSTR),
++		in->objectId,
++		in->lazyLoaded ? "not yet" : "already"));
++#endif
++
++	if(in->lazyLoaded){
++		in->lazyLoaded = 0;
++		chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++
++		result = yaffs_ReadChunkWithTagsFromNAND(dev,in->chunkId,chunkData,&tags);
++		oh = (yaffs_ObjectHeader *) chunkData;		
++
++		in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++		in->win_atime[0] = oh->win_atime[0];
++		in->win_ctime[0] = oh->win_ctime[0];
++		in->win_mtime[0] = oh->win_mtime[0];
++		in->win_atime[1] = oh->win_atime[1];
++		in->win_ctime[1] = oh->win_ctime[1];
++		in->win_mtime[1] = oh->win_mtime[1];
++#else
++		in->yst_uid = oh->yst_uid;
++		in->yst_gid = oh->yst_gid;
++		in->yst_atime = oh->yst_atime;
++		in->yst_mtime = oh->yst_mtime;
++		in->yst_ctime = oh->yst_ctime;
++		in->yst_rdev = oh->yst_rdev;
++		
++#endif
++		yaffs_SetObjectName(in, oh->name);
++		
++		if(in->variantType == YAFFS_OBJECT_TYPE_SYMLINK){
++			 in->variant.symLinkVariant.alias =
++						    yaffs_CloneString(oh->alias);
++			if(!in->variant.symLinkVariant.alias)
++				alloc_failed = 1; /* Not returned to caller */
++		}
++						    
++		yaffs_ReleaseTempBuffer(dev,chunkData, __LINE__);
++	}
++}
++
++static int yaffs_ScanBackwards(yaffs_Device * dev)
++{
++	yaffs_ExtendedTags tags;
++	int blk;
++	int blockIterator;
++	int startIterator;
++	int endIterator;
++	int nBlocksToScan = 0;
++
++	int chunk;
++	int result;
++	int c;
++	int deleted;
++	yaffs_BlockState state;
++	yaffs_Object *hardList = NULL;
++	yaffs_BlockInfo *bi;
++	int sequenceNumber;
++	yaffs_ObjectHeader *oh;
++	yaffs_Object *in;
++	yaffs_Object *parent;
++	int nBlocks = dev->internalEndBlock - dev->internalStartBlock + 1;
++	int itsUnlinked;
++	__u8 *chunkData;
++	
++	int fileSize;
++	int isShrink;
++	int foundChunksInBlock;
++	int equivalentObjectId;
++	int alloc_failed = 0;
++	
++
++	yaffs_BlockIndex *blockIndex = NULL;
++	int altBlockIndex = 0;
++
++	if (!dev->isYaffs2) {
++		T(YAFFS_TRACE_SCAN,
++		  (TSTR("yaffs_ScanBackwards is only for YAFFS2!" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	T(YAFFS_TRACE_SCAN,
++	  (TSTR
++	   ("yaffs_ScanBackwards starts  intstartblk %d intendblk %d..."
++	    TENDSTR), dev->internalStartBlock, dev->internalEndBlock));
++
++
++	dev->sequenceNumber = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++	blockIndex = YMALLOC(nBlocks * sizeof(yaffs_BlockIndex));
++	
++	if(!blockIndex) {
++		blockIndex = YMALLOC_ALT(nBlocks * sizeof(yaffs_BlockIndex));
++		altBlockIndex = 1;
++	}
++	
++	if(!blockIndex) {
++		T(YAFFS_TRACE_SCAN,
++		  (TSTR("yaffs_Scan() could not allocate block index!" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++	
++	dev->blocksInCheckpoint = 0;
++	
++	chunkData = yaffs_GetTempBuffer(dev, __LINE__);
++
++	/* Scan all the blocks to determine their state */
++	for (blk = dev->internalStartBlock; blk <= dev->internalEndBlock; blk++) {
++		bi = yaffs_GetBlockInfo(dev, blk);
++		yaffs_ClearChunkBits(dev, blk);
++		bi->pagesInUse = 0;
++		bi->softDeletions = 0;
++
++		yaffs_QueryInitialBlockState(dev, blk, &state, &sequenceNumber);
++
++		bi->blockState = state;
++		bi->sequenceNumber = sequenceNumber;
++
++		if(bi->sequenceNumber == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++			bi->blockState = state = YAFFS_BLOCK_STATE_CHECKPOINT;
++			
++		T(YAFFS_TRACE_SCAN_DEBUG,
++		  (TSTR("Block scanning block %d state %d seq %d" TENDSTR), blk,
++		   state, sequenceNumber));
++
++		
++		if(state == YAFFS_BLOCK_STATE_CHECKPOINT){
++			dev->blocksInCheckpoint++;
++			
++		} else if (state == YAFFS_BLOCK_STATE_DEAD) {
++			T(YAFFS_TRACE_BAD_BLOCKS,
++			  (TSTR("block %d is bad" TENDSTR), blk));
++		} else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++			T(YAFFS_TRACE_SCAN_DEBUG,
++			  (TSTR("Block empty " TENDSTR)));
++			dev->nErasedBlocks++;
++			dev->nFreeChunks += dev->nChunksPerBlock;
++		} else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++
++			/* Determine the highest sequence number */
++			if (dev->isYaffs2 &&
++			    sequenceNumber >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++			    sequenceNumber < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++
++				blockIndex[nBlocksToScan].seq = sequenceNumber;
++				blockIndex[nBlocksToScan].block = blk;
++
++				nBlocksToScan++;
++
++				if (sequenceNumber >= dev->sequenceNumber) {
++					dev->sequenceNumber = sequenceNumber;
++				}
++			} else if (dev->isYaffs2) {
++				/* TODO: Nasty sequence number! */
++				T(YAFFS_TRACE_SCAN,
++				  (TSTR
++				   ("Block scanning block %d has bad sequence number %d"
++				    TENDSTR), blk, sequenceNumber));
++
++			}
++		}
++	}
++
++	T(YAFFS_TRACE_SCAN,
++	(TSTR("%d blocks to be sorted..." TENDSTR), nBlocksToScan));
++
++
++
++	YYIELD();
++
++	/* Sort the blocks */
++#ifndef CONFIG_YAFFS_USE_OWN_SORT
++	yaffs_qsort(blockIndex, nBlocksToScan,
++		sizeof(yaffs_BlockIndex), ybicmp);
++#else
++	{
++	 	/* Dungy old bubble sort... */
++	 	
++		yaffs_BlockIndex temp;
++		int i;
++		int j;
++
++		for (i = 0; i < nBlocksToScan; i++)
++			for (j = i + 1; j < nBlocksToScan; j++)
++				if (blockIndex[i].seq > blockIndex[j].seq) {
++					temp = blockIndex[j];
++					blockIndex[j] = blockIndex[i];
++					blockIndex[i] = temp;
++				}
++	}
++#endif
++
++	YYIELD();
++
++    	T(YAFFS_TRACE_SCAN, (TSTR("...done" TENDSTR)));
++
++	/* Now scan the blocks looking at the data. */
++	startIterator = 0;
++	endIterator = nBlocksToScan - 1;
++	T(YAFFS_TRACE_SCAN_DEBUG,
++	  (TSTR("%d blocks to be scanned" TENDSTR), nBlocksToScan));
++
++	/* For each block.... backwards */
++	for (blockIterator = endIterator; !alloc_failed && blockIterator >= startIterator;
++	     blockIterator--) {
++	        /* Cooperative multitasking! This loop can run for so
++		   long that watchdog timers expire. */
++	        YYIELD();
++
++		/* get the block to scan in the correct order */
++		blk = blockIndex[blockIterator].block;
++
++		bi = yaffs_GetBlockInfo(dev, blk);
++		
++		
++		state = bi->blockState;
++
++		deleted = 0;
++
++		/* For each chunk in each block that needs scanning.... */
++		foundChunksInBlock = 0;
++		for (c = dev->nChunksPerBlock - 1; 
++		     !alloc_failed && c >= 0 &&
++		     (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++		      state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
++			/* Scan backwards... 
++			 * Read the tags and decide what to do
++			 */
++			
++			chunk = blk * dev->nChunksPerBlock + c;
++
++			result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
++							&tags);
++
++			/* Let's have a good look at this chunk... */
++
++			if (!tags.chunkUsed) {
++				/* An unassigned chunk in the block.
++				 * If there are used chunks after this one, then
++				 * it is a chunk that was skipped due to failing the erased
++				 * check. Just skip it so that it can be deleted.
++				 * But, more typically, We get here when this is an unallocated
++				 * chunk and his means that either the block is empty or 
++				 * this is the one being allocated from
++				 */
++
++				if(foundChunksInBlock)
++				{
++					/* This is a chunk that was skipped due to failing the erased check */
++					
++				} else if (c == 0) {
++					/* We're looking at the first chunk in the block so the block is unused */
++					state = YAFFS_BLOCK_STATE_EMPTY;
++					dev->nErasedBlocks++;
++				} else {
++					if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
++					    state == YAFFS_BLOCK_STATE_ALLOCATING) {
++					    	if(dev->sequenceNumber == bi->sequenceNumber) {
++							/* this is the block being allocated from */
++					    	
++							T(YAFFS_TRACE_SCAN,
++							  (TSTR
++							   (" Allocating from %d %d"
++							    TENDSTR), blk, c));
++
++							state = YAFFS_BLOCK_STATE_ALLOCATING;
++							dev->allocationBlock = blk;
++							dev->allocationPage = c;
++							dev->allocationBlockFinder = blk;	
++						}
++						else {
++							/* This is a partially written block that is not
++							 * the current allocation block. This block must have
++							 * had a write failure, so set up for retirement.
++							 */
++						  
++							 bi->needsRetiring = 1;
++							 bi->gcPrioritise = 1;
++							 						 
++							 T(YAFFS_TRACE_ALWAYS,
++							 (TSTR("Partially written block %d being set for retirement" TENDSTR),
++							 blk));
++						}
++
++					}
++					 
++				}
++
++				dev->nFreeChunks++;
++				
++			} else if (tags.chunkId > 0) {
++				/* chunkId > 0 so it is a data chunk... */
++				unsigned int endpos;
++				__u32 chunkBase =
++				    (tags.chunkId - 1) * dev->nDataBytesPerChunk;
++								
++				foundChunksInBlock = 1;
++
++
++				yaffs_SetChunkBit(dev, blk, c);
++				bi->pagesInUse++;
++
++				in = yaffs_FindOrCreateObjectByNumber(dev,
++								      tags.
++								      objectId,
++								      YAFFS_OBJECT_TYPE_FILE);
++				if(!in){
++					/* Out of memory */
++					alloc_failed = 1;
++				}
++				
++				if (in &&
++				    in->variantType == YAFFS_OBJECT_TYPE_FILE
++				    && chunkBase <
++				    in->variant.fileVariant.shrinkSize) {
++					/* This has not been invalidated by a resize */
++					if(!yaffs_PutChunkIntoFile(in, tags.chunkId,
++							       chunk, -1)){
++						alloc_failed = 1;
++					}
++
++					/* File size is calculated by looking at the data chunks if we have not 
++					 * seen an object header yet. Stop this practice once we find an object header.
++					 */
++					endpos =
++					    (tags.chunkId -
++					     1) * dev->nDataBytesPerChunk +
++					    tags.byteCount;
++					    
++					if (!in->valid &&	/* have not got an object header yet */
++					    in->variant.fileVariant.
++					    scannedFileSize < endpos) {
++						in->variant.fileVariant.
++						    scannedFileSize = endpos;
++						in->variant.fileVariant.
++						    fileSize =
++						    in->variant.fileVariant.
++						    scannedFileSize;
++					}
++
++				} else if(in) {
++					/* This chunk has been invalidated by a resize, so delete */
++					yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++
++				}
++			} else {
++				/* chunkId == 0, so it is an ObjectHeader.
++				 * Thus, we read in the object header and make the object
++				 */
++				foundChunksInBlock = 1;
++
++				yaffs_SetChunkBit(dev, blk, c);
++				bi->pagesInUse++;
++
++				oh = NULL;
++				in = NULL;
++
++				if (tags.extraHeaderInfoAvailable) {
++					in = yaffs_FindOrCreateObjectByNumber
++					    (dev, tags.objectId,
++					     tags.extraObjectType);
++				}
++
++				if (!in ||
++#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
++				    !in->valid ||
++#endif
++				    tags.extraShadows ||
++				    (!in->valid &&
++				    (tags.objectId == YAFFS_OBJECTID_ROOT ||
++				     tags.objectId == YAFFS_OBJECTID_LOSTNFOUND))
++				    ) {
++
++					/* If we don't have  valid info then we need to read the chunk
++					 * TODO In future we can probably defer reading the chunk and 
++					 * living with invalid data until needed.
++					 */
++
++					result = yaffs_ReadChunkWithTagsFromNAND(dev,
++									chunk,
++									chunkData,
++									NULL);
++
++					oh = (yaffs_ObjectHeader *) chunkData;
++
++					if (!in)
++						in = yaffs_FindOrCreateObjectByNumber(dev, tags.objectId, oh->type);
++
++				}
++
++				if (!in) {
++					/* TODO Hoosterman we have a problem! */
++					T(YAFFS_TRACE_ERROR,
++					  (TSTR
++					   ("yaffs tragedy: Could not make object for object  %d  "
++					    "at chunk %d during scan"
++					    TENDSTR), tags.objectId, chunk));
++
++				}
++
++				if (in->valid) {
++					/* We have already filled this one.
++					 * We have a duplicate that will be discarded, but 
++					 * we first have to suck out resize info if it is a file.
++					 */
++
++					if ((in->variantType == YAFFS_OBJECT_TYPE_FILE) && 
++					     ((oh && 
++					       oh-> type == YAFFS_OBJECT_TYPE_FILE)||
++					      (tags.extraHeaderInfoAvailable  &&
++					       tags.extraObjectType == YAFFS_OBJECT_TYPE_FILE))
++					    ) {
++						__u32 thisSize =
++						    (oh) ? oh->fileSize : tags.
++						    extraFileLength;
++						__u32 parentObjectId =
++						    (oh) ? oh->
++						    parentObjectId : tags.
++						    extraParentObjectId;
++						unsigned isShrink =
++						    (oh) ? oh->isShrink : tags.
++						    extraIsShrinkHeader;
++
++						/* If it is deleted (unlinked at start also means deleted)
++						 * we treat the file size as being zeroed at this point.
++						 */
++						if (parentObjectId ==
++						    YAFFS_OBJECTID_DELETED
++						    || parentObjectId ==
++						    YAFFS_OBJECTID_UNLINKED) {
++							thisSize = 0;
++							isShrink = 1;
++						}
++
++						if (isShrink &&
++						    in->variant.fileVariant.
++						    shrinkSize > thisSize) {
++							in->variant.fileVariant.
++							    shrinkSize =
++							    thisSize;
++						}
++
++						if (isShrink) {
++							bi->hasShrinkHeader = 1;
++						}
++
++					}
++					/* Use existing - destroy this one. */
++					yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
++
++				}
++
++				if (!in->valid &&
++				    (tags.objectId == YAFFS_OBJECTID_ROOT ||
++				     tags.objectId ==
++				     YAFFS_OBJECTID_LOSTNFOUND)) {
++					/* We only load some info, don't fiddle with directory structure */
++					in->valid = 1;
++					
++					if(oh) {
++						in->variantType = oh->type;
++
++						in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++						in->win_atime[0] = oh->win_atime[0];
++						in->win_ctime[0] = oh->win_ctime[0];
++						in->win_mtime[0] = oh->win_mtime[0];
++						in->win_atime[1] = oh->win_atime[1];
++						in->win_ctime[1] = oh->win_ctime[1];
++						in->win_mtime[1] = oh->win_mtime[1];
++#else
++						in->yst_uid = oh->yst_uid;
++						in->yst_gid = oh->yst_gid;
++						in->yst_atime = oh->yst_atime;
++						in->yst_mtime = oh->yst_mtime;
++						in->yst_ctime = oh->yst_ctime;
++						in->yst_rdev = oh->yst_rdev;
++		
++#endif
++					} else {
++						in->variantType = tags.extraObjectType;
++						in->lazyLoaded = 1;
++					}
++						
++					in->chunkId = chunk;
++
++				} else if (!in->valid) {
++					/* we need to load this info */
++
++					in->valid = 1;
++					in->chunkId = chunk;
++					
++					if(oh) {
++						in->variantType = oh->type;
++
++						in->yst_mode = oh->yst_mode;
++#ifdef CONFIG_YAFFS_WINCE
++						in->win_atime[0] = oh->win_atime[0];
++						in->win_ctime[0] = oh->win_ctime[0];
++						in->win_mtime[0] = oh->win_mtime[0];
++						in->win_atime[1] = oh->win_atime[1];
++						in->win_ctime[1] = oh->win_ctime[1];
++						in->win_mtime[1] = oh->win_mtime[1];
++#else
++						in->yst_uid = oh->yst_uid;
++						in->yst_gid = oh->yst_gid;
++						in->yst_atime = oh->yst_atime;
++						in->yst_mtime = oh->yst_mtime;
++						in->yst_ctime = oh->yst_ctime;
++						in->yst_rdev = oh->yst_rdev;
++#endif
++
++						if (oh->shadowsObject > 0) 
++							yaffs_HandleShadowedObject(dev,
++									   oh->
++									   shadowsObject,
++									   1);
++					
++
++						yaffs_SetObjectName(in, oh->name);
++						parent =
++						    yaffs_FindOrCreateObjectByNumber
++					    		(dev, oh->parentObjectId,
++					     		 YAFFS_OBJECT_TYPE_DIRECTORY);
++
++						 fileSize = oh->fileSize;
++ 						 isShrink = oh->isShrink;
++						 equivalentObjectId = oh->equivalentObjectId;
++
++					}
++					else {
++						in->variantType = tags.extraObjectType;
++						parent =
++						    yaffs_FindOrCreateObjectByNumber
++					    		(dev, tags.extraParentObjectId,
++					     		 YAFFS_OBJECT_TYPE_DIRECTORY);
++						 fileSize = tags.extraFileLength;
++						 isShrink = tags.extraIsShrinkHeader;
++						 equivalentObjectId = tags.extraEquivalentObjectId;
++						in->lazyLoaded = 1;
++
++					}
++					in->dirty = 0;
++
++					/* directory stuff...
++					 * hook up to parent
++					 */
++
++					if (parent->variantType ==
++					    YAFFS_OBJECT_TYPE_UNKNOWN) {
++						/* Set up as a directory */
++						parent->variantType =
++						    YAFFS_OBJECT_TYPE_DIRECTORY;
++						INIT_LIST_HEAD(&parent->variant.
++							       directoryVariant.
++							       children);
++					} else if (parent->variantType !=
++						   YAFFS_OBJECT_TYPE_DIRECTORY)
++					{
++						/* Hoosterman, another problem....
++						 * We're trying to use a non-directory as a directory
++						 */
++
++						T(YAFFS_TRACE_ERROR,
++						  (TSTR
++						   ("yaffs tragedy: attempting to use non-directory as"
++						    " a directory in scan. Put in lost+found."
++						    TENDSTR)));
++						parent = dev->lostNFoundDir;
++					}
++
++					yaffs_AddObjectToDirectory(parent, in);
++
++					itsUnlinked = (parent == dev->deletedDir) ||
++						      (parent == dev->unlinkedDir);
++
++					if (isShrink) {
++						/* Mark the block as having a shrinkHeader */
++						bi->hasShrinkHeader = 1;
++					}
++
++					/* Note re hardlinks.
++					 * Since we might scan a hardlink before its equivalent object is scanned
++					 * we put them all in a list.
++					 * After scanning is complete, we should have all the objects, so we run
++					 * through this list and fix up all the chains.              
++					 */
++
++					switch (in->variantType) {
++					case YAFFS_OBJECT_TYPE_UNKNOWN:	
++						/* Todo got a problem */
++						break;
++					case YAFFS_OBJECT_TYPE_FILE:
++
++						if (in->variant.fileVariant.
++						    scannedFileSize < fileSize) {
++							/* This covers the case where the file size is greater
++							 * than where the data is
++							 * This will happen if the file is resized to be larger 
++							 * than its current data extents.
++							 */
++							in->variant.fileVariant.fileSize = fileSize;
++							in->variant.fileVariant.scannedFileSize =
++							    in->variant.fileVariant.fileSize;
++						}
++
++						if (isShrink &&
++						    in->variant.fileVariant.shrinkSize > fileSize) {
++							in->variant.fileVariant.shrinkSize = fileSize;
++						}
++
++						break;
++					case YAFFS_OBJECT_TYPE_HARDLINK:
++						if(!itsUnlinked) {
++						  in->variant.hardLinkVariant.equivalentObjectId =
++						    equivalentObjectId;
++						  in->hardLinks.next =
++						    (struct list_head *) hardList;
++						  hardList = in;
++						}
++						break;
++					case YAFFS_OBJECT_TYPE_DIRECTORY:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SPECIAL:
++						/* Do nothing */
++						break;
++					case YAFFS_OBJECT_TYPE_SYMLINK:
++						if(oh){
++						   in->variant.symLinkVariant.alias =
++						    yaffs_CloneString(oh->
++								      alias);
++						   if(!in->variant.symLinkVariant.alias)
++						   	alloc_failed = 1;
++						}
++						break;
++					}
++
++				}
++				
++			}
++
++		} /* End of scanning for each chunk */
++
++		if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
++			/* If we got this far while scanning, then the block is fully allocated. */
++			state = YAFFS_BLOCK_STATE_FULL;
++		}
++
++		bi->blockState = state;
++
++		/* Now let's see if it was dirty */
++		if (bi->pagesInUse == 0 &&
++		    !bi->hasShrinkHeader &&
++		    bi->blockState == YAFFS_BLOCK_STATE_FULL) {
++			yaffs_BlockBecameDirty(dev, blk);
++		}
++
++	}
++
++	if (altBlockIndex) 
++		YFREE_ALT(blockIndex);
++	else
++		YFREE(blockIndex);
++	
++	/* Ok, we've done all the scanning.
++	 * Fix up the hard link chains.
++	 * We should now have scanned all the objects, now it's time to add these 
++	 * hardlinks.
++	 */
++	yaffs_HardlinkFixup(dev,hardList);
++	
++	
++	/*
++	*  Sort out state of unlinked and deleted objects.
++	*/
++	{
++		struct list_head *i;
++		struct list_head *n;
++
++		yaffs_Object *l;
++
++		/* Soft delete all the unlinked files */
++		list_for_each_safe(i, n,
++				   &dev->unlinkedDir->variant.directoryVariant.
++				   children) {
++			if (i) {
++				l = list_entry(i, yaffs_Object, siblings);
++				yaffs_DestroyObject(l);
++			}
++		}
++
++		/* Soft delete all the deletedDir files */
++		list_for_each_safe(i, n,
++				   &dev->deletedDir->variant.directoryVariant.
++				   children) {
++			if (i) {
++				l = list_entry(i, yaffs_Object, siblings);
++				yaffs_DestroyObject(l);
++
++			}
++		}
++	}
++
++	yaffs_ReleaseTempBuffer(dev, chunkData, __LINE__);
++	
++	if(alloc_failed){
++		return YAFFS_FAIL;
++	}
++
++	T(YAFFS_TRACE_SCAN, (TSTR("yaffs_ScanBackwards ends" TENDSTR)));
++
++	return YAFFS_OK;
++}
++
++/*------------------------------  Directory Functions ----------------------------- */
++
++static void yaffs_RemoveObjectFromDirectory(yaffs_Object * obj)
++{
++	yaffs_Device *dev = obj->myDev;
++	
++	if(dev && dev->removeObjectCallback)
++		dev->removeObjectCallback(obj);
++	   
++	list_del_init(&obj->siblings);
++	obj->parent = NULL;
++}
++
++
++static void yaffs_AddObjectToDirectory(yaffs_Object * directory,
++				       yaffs_Object * obj)
++{
++
++	if (!directory) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: Trying to add an object to a null pointer directory"
++		    TENDSTR)));
++		YBUG();
++	}
++	if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: Trying to add an object to a non-directory"
++		    TENDSTR)));
++		YBUG();
++	}
++
++	if (obj->siblings.prev == NULL) {
++		/* Not initialised */
++		INIT_LIST_HEAD(&obj->siblings);
++
++	} else if (!list_empty(&obj->siblings)) {
++		/* If it is holed up somewhere else, un hook it */
++		yaffs_RemoveObjectFromDirectory(obj);
++	}
++	/* Now add it */
++	list_add(&obj->siblings, &directory->variant.directoryVariant.children);
++	obj->parent = directory;
++
++	if (directory == obj->myDev->unlinkedDir
++	    || directory == obj->myDev->deletedDir) {
++		obj->unlinked = 1;
++		obj->myDev->nUnlinkedFiles++;
++		obj->renameAllowed = 0;
++	}
++}
++
++yaffs_Object *yaffs_FindObjectByName(yaffs_Object * directory,
++				     const YCHAR * name)
++{
++	int sum;
++
++	struct list_head *i;
++	YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
++
++	yaffs_Object *l;
++
++	if (!name) {
++		return NULL;
++	}
++
++	if (!directory) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_FindObjectByName: null pointer directory"
++		    TENDSTR)));
++		YBUG();
++	}
++	if (directory->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
++		YBUG();
++	}
++
++	sum = yaffs_CalcNameSum(name);
++
++	list_for_each(i, &directory->variant.directoryVariant.children) {
++		if (i) {
++			l = list_entry(i, yaffs_Object, siblings);
++			
++			yaffs_CheckObjectDetailsLoaded(l);
++
++			/* Special case for lost-n-found */
++			if (l->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
++				if (yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME) == 0) {
++					return l;
++				}
++			} else if (yaffs_SumCompare(l->sum, sum) || l->chunkId <= 0)	
++			{
++				/* LostnFound cunk called Objxxx
++				 * Do a real check
++				 */
++				yaffs_GetObjectName(l, buffer,
++						    YAFFS_MAX_NAME_LENGTH);
++				if (yaffs_strncmp(name, buffer,YAFFS_MAX_NAME_LENGTH) == 0) {
++					return l;
++				}
++
++			}
++		}
++	}
++
++	return NULL;
++}
++
++
++#if 0
++int yaffs_ApplyToDirectoryChildren(yaffs_Object * theDir,
++				   int (*fn) (yaffs_Object *))
++{
++	struct list_head *i;
++	yaffs_Object *l;
++
++	if (!theDir) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_FindObjectByName: null pointer directory"
++		    TENDSTR)));
++		YBUG();
++	}
++	if (theDir->variantType != YAFFS_OBJECT_TYPE_DIRECTORY) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("tragedy: yaffs_FindObjectByName: non-directory" TENDSTR)));
++		YBUG();
++	}
++
++	list_for_each(i, &theDir->variant.directoryVariant.children) {
++		if (i) {
++			l = list_entry(i, yaffs_Object, siblings);
++			if (l && !fn(l)) {
++				return YAFFS_FAIL;
++			}
++		}
++	}
++
++	return YAFFS_OK;
++
++}
++#endif
++
++/* GetEquivalentObject dereferences any hard links to get to the
++ * actual object.
++ */
++
++yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object * obj)
++{
++	if (obj && obj->variantType == YAFFS_OBJECT_TYPE_HARDLINK) {
++		/* We want the object id of the equivalent object, not this one */
++		obj = obj->variant.hardLinkVariant.equivalentObject;
++		yaffs_CheckObjectDetailsLoaded(obj);
++	}
++	return obj;
++
++}
++
++int yaffs_GetObjectName(yaffs_Object * obj, YCHAR * name, int buffSize)
++{
++	memset(name, 0, buffSize * sizeof(YCHAR));
++	
++	yaffs_CheckObjectDetailsLoaded(obj);
++
++	if (obj->objectId == YAFFS_OBJECTID_LOSTNFOUND) {
++		yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffSize - 1);
++	} else if (obj->chunkId <= 0) {
++		YCHAR locName[20];
++		/* make up a name */
++		yaffs_sprintf(locName, _Y("%s%d"), YAFFS_LOSTNFOUND_PREFIX,
++			      obj->objectId);
++		yaffs_strncpy(name, locName, buffSize - 1);
++
++	}
++#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++	else if (obj->shortName[0]) {
++		yaffs_strcpy(name, obj->shortName);
++	}
++#endif
++	else {
++		int result;
++		__u8 *buffer = yaffs_GetTempBuffer(obj->myDev, __LINE__);
++
++		yaffs_ObjectHeader *oh = (yaffs_ObjectHeader *) buffer;
++
++		memset(buffer, 0, obj->myDev->nDataBytesPerChunk);
++
++		if (obj->chunkId >= 0) {
++			result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
++							obj->chunkId, buffer,
++							NULL);
++		}
++		yaffs_strncpy(name, oh->name, buffSize - 1);
++
++		yaffs_ReleaseTempBuffer(obj->myDev, buffer, __LINE__);
++	}
++
++	return yaffs_strlen(name);
++}
++
++int yaffs_GetObjectFileLength(yaffs_Object * obj)
++{
++
++	/* Dereference any hard linking */
++	obj = yaffs_GetEquivalentObject(obj);
++
++	if (obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
++		return obj->variant.fileVariant.fileSize;
++	}
++	if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++		return yaffs_strlen(obj->variant.symLinkVariant.alias);
++	} else {
++		/* Only a directory should drop through to here */
++		return obj->myDev->nDataBytesPerChunk;
++	}
++}
++
++int yaffs_GetObjectLinkCount(yaffs_Object * obj)
++{
++	int count = 0;
++	struct list_head *i;
++
++	if (!obj->unlinked) {
++		count++;	/* the object itself */
++	}
++	list_for_each(i, &obj->hardLinks) {
++		count++;	/* add the hard links; */
++	}
++	return count;
++
++}
++
++int yaffs_GetObjectInode(yaffs_Object * obj)
++{
++	obj = yaffs_GetEquivalentObject(obj);
++
++	return obj->objectId;
++}
++
++unsigned yaffs_GetObjectType(yaffs_Object * obj)
++{
++	obj = yaffs_GetEquivalentObject(obj);
++
++	switch (obj->variantType) {
++	case YAFFS_OBJECT_TYPE_FILE:
++		return DT_REG;
++		break;
++	case YAFFS_OBJECT_TYPE_DIRECTORY:
++		return DT_DIR;
++		break;
++	case YAFFS_OBJECT_TYPE_SYMLINK:
++		return DT_LNK;
++		break;
++	case YAFFS_OBJECT_TYPE_HARDLINK:
++		return DT_REG;
++		break;
++	case YAFFS_OBJECT_TYPE_SPECIAL:
++		if (S_ISFIFO(obj->yst_mode))
++			return DT_FIFO;
++		if (S_ISCHR(obj->yst_mode))
++			return DT_CHR;
++		if (S_ISBLK(obj->yst_mode))
++			return DT_BLK;
++		if (S_ISSOCK(obj->yst_mode))
++			return DT_SOCK;
++	default:
++		return DT_REG;
++		break;
++	}
++}
++
++YCHAR *yaffs_GetSymlinkAlias(yaffs_Object * obj)
++{
++	obj = yaffs_GetEquivalentObject(obj);
++	if (obj->variantType == YAFFS_OBJECT_TYPE_SYMLINK) {
++		return yaffs_CloneString(obj->variant.symLinkVariant.alias);
++	} else {
++		return yaffs_CloneString(_Y(""));
++	}
++}
++
++#ifndef CONFIG_YAFFS_WINCE
++
++int yaffs_SetAttributes(yaffs_Object * obj, struct iattr *attr)
++{
++	unsigned int valid = attr->ia_valid;
++
++	if (valid & ATTR_MODE)
++		obj->yst_mode = attr->ia_mode;
++	if (valid & ATTR_UID)
++		obj->yst_uid = attr->ia_uid;
++	if (valid & ATTR_GID)
++		obj->yst_gid = attr->ia_gid;
++
++	if (valid & ATTR_ATIME)
++		obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
++	if (valid & ATTR_CTIME)
++		obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
++	if (valid & ATTR_MTIME)
++		obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
++
++	if (valid & ATTR_SIZE)
++		yaffs_ResizeFile(obj, attr->ia_size);
++
++	yaffs_UpdateObjectHeader(obj, NULL, 1, 0, 0);
++
++	return YAFFS_OK;
++
++}
++int yaffs_GetAttributes(yaffs_Object * obj, struct iattr *attr)
++{
++	unsigned int valid = 0;
++
++	attr->ia_mode = obj->yst_mode;
++	valid |= ATTR_MODE;
++	attr->ia_uid = obj->yst_uid;
++	valid |= ATTR_UID;
++	attr->ia_gid = obj->yst_gid;
++	valid |= ATTR_GID;
++
++	Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
++	valid |= ATTR_ATIME;
++	Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
++	valid |= ATTR_CTIME;
++	Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
++	valid |= ATTR_MTIME;
++
++	attr->ia_size = yaffs_GetFileSize(obj);
++	valid |= ATTR_SIZE;
++
++	attr->ia_valid = valid;
++
++	return YAFFS_OK;
++
++}
++
++#endif
++
++#if 0
++int yaffs_DumpObject(yaffs_Object * obj)
++{
++	YCHAR name[257];
++
++	yaffs_GetObjectName(obj, name, 256);
++
++	T(YAFFS_TRACE_ALWAYS,
++	  (TSTR
++	   ("Object %d, inode %d \"%s\"\n dirty %d valid %d serial %d sum %d"
++	    " chunk %d type %d size %d\n"
++	    TENDSTR), obj->objectId, yaffs_GetObjectInode(obj), name,
++	   obj->dirty, obj->valid, obj->serial, obj->sum, obj->chunkId,
++	   yaffs_GetObjectType(obj), yaffs_GetObjectFileLength(obj)));
++
++	return YAFFS_OK;
++}
++#endif
++
++/*---------------------------- Initialisation code -------------------------------------- */
++
++static int yaffs_CheckDevFunctions(const yaffs_Device * dev)
++{
++
++	/* Common functions, gotta have */
++	if (!dev->eraseBlockInNAND || !dev->initialiseNAND)
++		return 0;
++
++#ifdef CONFIG_YAFFS_YAFFS2
++
++	/* Can use the "with tags" style interface for yaffs1 or yaffs2 */
++	if (dev->writeChunkWithTagsToNAND &&
++	    dev->readChunkWithTagsFromNAND &&
++	    !dev->writeChunkToNAND &&
++	    !dev->readChunkFromNAND &&
++	    dev->markNANDBlockBad && dev->queryNANDBlock)
++		return 1;
++#endif
++
++	/* Can use the "spare" style interface for yaffs1 */
++	if (!dev->isYaffs2 &&
++	    !dev->writeChunkWithTagsToNAND &&
++	    !dev->readChunkWithTagsFromNAND &&
++	    dev->writeChunkToNAND &&
++	    dev->readChunkFromNAND &&
++	    !dev->markNANDBlockBad && !dev->queryNANDBlock)
++		return 1;
++
++	return 0;		/* bad */
++}
++
++
++static int yaffs_CreateInitialDirectories(yaffs_Device *dev)
++{
++	/* Initialise the unlinked, deleted, root and lost and found directories */
++	
++	dev->lostNFoundDir = dev->rootDir =  NULL;
++	dev->unlinkedDir = dev->deletedDir = NULL;
++
++	dev->unlinkedDir =
++	    yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
++	
++	dev->deletedDir =
++	    yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
++
++	dev->rootDir =
++	    yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_ROOT,
++				      YAFFS_ROOT_MODE | S_IFDIR);
++	dev->lostNFoundDir =
++	    yaffs_CreateFakeDirectory(dev, YAFFS_OBJECTID_LOSTNFOUND,
++				      YAFFS_LOSTNFOUND_MODE | S_IFDIR);
++	
++	if(dev->lostNFoundDir && dev->rootDir && dev->unlinkedDir && dev->deletedDir){
++		yaffs_AddObjectToDirectory(dev->rootDir, dev->lostNFoundDir);
++		return YAFFS_OK;
++	}
++	
++	return YAFFS_FAIL;
++}
++
++int yaffs_GutsInitialise(yaffs_Device * dev)
++{
++	int init_failed = 0;
++	unsigned x;
++	int bits;
++
++	T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_GutsInitialise()" TENDSTR)));
++
++	/* Check stuff that must be set */
++
++	if (!dev) {
++		T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Need a device" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	dev->internalStartBlock = dev->startBlock;
++	dev->internalEndBlock = dev->endBlock;
++	dev->blockOffset = 0;
++	dev->chunkOffset = 0;
++	dev->nFreeChunks = 0;
++
++	if (dev->startBlock == 0) {
++		dev->internalStartBlock = dev->startBlock + 1;
++		dev->internalEndBlock = dev->endBlock + 1;
++		dev->blockOffset = 1;
++		dev->chunkOffset = dev->nChunksPerBlock;
++	}
++
++	/* Check geometry parameters. */
++
++	if ((dev->isYaffs2 && dev->nDataBytesPerChunk < 1024) || 
++	    (!dev->isYaffs2 && dev->nDataBytesPerChunk != 512) || 
++	     dev->nChunksPerBlock < 2 || 
++	     dev->nReservedBlocks < 2 || 
++	     dev->internalStartBlock <= 0 || 
++	     dev->internalEndBlock <= 0 || 
++	     dev->internalEndBlock <= (dev->internalStartBlock + dev->nReservedBlocks + 2)	// otherwise it is too small
++	    ) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s "
++		    TENDSTR), dev->nDataBytesPerChunk, dev->isYaffs2 ? "2" : ""));
++		return YAFFS_FAIL;
++	}
++
++	if (yaffs_InitialiseNAND(dev) != YAFFS_OK) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("yaffs: InitialiseNAND failed" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	/* Got the right mix of functions? */
++	if (!yaffs_CheckDevFunctions(dev)) {
++		/* Function missing */
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR
++		   ("yaffs: device function(s) missing or wrong\n" TENDSTR)));
++
++		return YAFFS_FAIL;
++	}
++
++	/* This is really a compilation check. */
++	if (!yaffs_CheckStructures()) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("yaffs_CheckStructures failed\n" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	if (dev->isMounted) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("yaffs: device already mounted\n" TENDSTR)));
++		return YAFFS_FAIL;
++	}
++
++	/* Finished with most checks. One or two more checks happen later on too. */
++
++	dev->isMounted = 1;
++
++
++
++	/* OK now calculate a few things for the device */
++	
++	/*
++	 *  Calculate all the chunk size manipulation numbers: 
++	 */
++	 /* Start off assuming it is a power of 2 */
++	 dev->chunkShift = ShiftDiv(dev->nDataBytesPerChunk);
++	 dev->chunkMask = (1<<dev->chunkShift) - 1;
++
++	 if(dev->nDataBytesPerChunk == (dev->chunkMask + 1)){
++	 	/* Yes it is a power of 2, disable crumbs */
++		dev->crumbMask = 0;
++		dev->crumbShift = 0;
++		dev->crumbsPerChunk = 0;
++	 } else {
++	 	/* Not a power of 2, use crumbs instead */
++		dev->crumbShift = ShiftDiv(sizeof(yaffs_PackedTags2TagsPart));
++		dev->crumbMask = (1<<dev->crumbShift)-1;
++		dev->crumbsPerChunk = dev->nDataBytesPerChunk/(1 << dev->crumbShift);
++		dev->chunkShift = 0;
++		dev->chunkMask = 0;
++	}
++	 	
++
++	/*
++	 * Calculate chunkGroupBits.
++	 * We need to find the next power of 2 > than internalEndBlock
++	 */
++
++	x = dev->nChunksPerBlock * (dev->internalEndBlock + 1);
++	
++	bits = ShiftsGE(x);
++	
++	/* Set up tnode width if wide tnodes are enabled. */
++	if(!dev->wideTnodesDisabled){
++		/* bits must be even so that we end up with 32-bit words */
++		if(bits & 1)
++			bits++;
++		if(bits < 16)
++			dev->tnodeWidth = 16;
++		else
++			dev->tnodeWidth = bits;
++	}
++	else
++		dev->tnodeWidth = 16;
++ 
++	dev->tnodeMask = (1<<dev->tnodeWidth)-1;
++		
++	/* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
++	 * so if the bitwidth of the
++	 * chunk range we're using is greater than 16 we need
++	 * to figure out chunk shift and chunkGroupSize
++	 */
++		 
++	if (bits <= dev->tnodeWidth)
++		dev->chunkGroupBits = 0;
++	else
++		dev->chunkGroupBits = bits - dev->tnodeWidth;
++		
++
++	dev->chunkGroupSize = 1 << dev->chunkGroupBits;
++
++	if (dev->nChunksPerBlock < dev->chunkGroupSize) {
++		/* We have a problem because the soft delete won't work if
++		 * the chunk group size > chunks per block.
++		 * This can be remedied by using larger "virtual blocks".
++		 */
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("yaffs: chunk group too large\n" TENDSTR)));
++
++		return YAFFS_FAIL;
++	}
++
++	/* OK, we've finished verifying the device, lets continue with initialisation */
++
++	/* More device initialisation */
++	dev->garbageCollections = 0;
++	dev->passiveGarbageCollections = 0;
++	dev->currentDirtyChecker = 0;
++	dev->bufferedBlock = -1;
++	dev->doingBufferedBlockRewrite = 0;
++	dev->nDeletedFiles = 0;
++	dev->nBackgroundDeletions = 0;
++	dev->nUnlinkedFiles = 0;
++	dev->eccFixed = 0;
++	dev->eccUnfixed = 0;
++	dev->tagsEccFixed = 0;
++	dev->tagsEccUnfixed = 0;
++	dev->nErasureFailures = 0;
++	dev->nErasedBlocks = 0;
++	dev->isDoingGC = 0;
++	dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
++
++	/* Initialise temporary buffers and caches. */
++	if(!yaffs_InitialiseTempBuffers(dev))
++		init_failed = 1;
++	
++	dev->srCache = NULL;
++	dev->gcCleanupList = NULL;
++	
++	
++	if (!init_failed &&
++	    dev->nShortOpCaches > 0) {
++		int i;
++		__u8 *buf;
++		int srCacheBytes = dev->nShortOpCaches * sizeof(yaffs_ChunkCache);
++
++		if (dev->nShortOpCaches > YAFFS_MAX_SHORT_OP_CACHES) {
++			dev->nShortOpCaches = YAFFS_MAX_SHORT_OP_CACHES;
++		}
++
++		buf = dev->srCache =  YMALLOC(srCacheBytes);
++		    
++		if(dev->srCache)
++			memset(dev->srCache,0,srCacheBytes);
++		   
++		for (i = 0; i < dev->nShortOpCaches && buf; i++) {
++			dev->srCache[i].object = NULL;
++			dev->srCache[i].lastUse = 0;
++			dev->srCache[i].dirty = 0;
++			dev->srCache[i].data = buf = YMALLOC_DMA(dev->nDataBytesPerChunk);
++		}
++		if(!buf)
++			init_failed = 1;
++			
++		dev->srLastUse = 0;
++	}
++
++	dev->cacheHits = 0;
++	
++	if(!init_failed){
++		dev->gcCleanupList = YMALLOC(dev->nChunksPerBlock * sizeof(__u32));
++		if(!dev->gcCleanupList)
++			init_failed = 1;
++	}
++
++	if (dev->isYaffs2) {
++		dev->useHeaderFileSize = 1;
++	}
++	if(!init_failed && !yaffs_InitialiseBlocks(dev))
++		init_failed = 1;
++		
++	yaffs_InitialiseTnodes(dev);
++	yaffs_InitialiseObjects(dev);
++
++	if(!init_failed && !yaffs_CreateInitialDirectories(dev))
++		init_failed = 1;
++
++
++	if(!init_failed){
++		/* Now scan the flash. */
++		if (dev->isYaffs2) {
++			if(yaffs_CheckpointRestore(dev)) {
++				T(YAFFS_TRACE_ALWAYS,
++				  (TSTR("yaffs: restored from checkpoint" TENDSTR)));
++			} else {
++
++				/* Clean up the mess caused by an aborted checkpoint load 
++				 * and scan backwards. 
++				 */
++				yaffs_DeinitialiseBlocks(dev);
++				yaffs_DeinitialiseTnodes(dev);
++				yaffs_DeinitialiseObjects(dev);
++				
++			
++				dev->nErasedBlocks = 0;
++				dev->nFreeChunks = 0;
++				dev->allocationBlock = -1;
++				dev->allocationPage = -1;
++				dev->nDeletedFiles = 0;
++				dev->nUnlinkedFiles = 0;
++				dev->nBackgroundDeletions = 0;
++				dev->oldestDirtySequence = 0;
++
++				if(!init_failed && !yaffs_InitialiseBlocks(dev))
++					init_failed = 1;
++					
++				yaffs_InitialiseTnodes(dev);
++				yaffs_InitialiseObjects(dev);
++
++				if(!init_failed && !yaffs_CreateInitialDirectories(dev))
++					init_failed = 1;
++
++				if(!init_failed && !yaffs_ScanBackwards(dev))
++					init_failed = 1;
++			}
++		}else
++			if(!yaffs_Scan(dev))
++				init_failed = 1;
++	}
++		
++	if(init_failed){
++		/* Clean up the mess */
++		T(YAFFS_TRACE_TRACING,
++		  (TSTR("yaffs: yaffs_GutsInitialise() aborted.\n" TENDSTR)));
++
++		yaffs_Deinitialise(dev);
++		return YAFFS_FAIL;
++	}
++
++	/* Zero out stats */
++	dev->nPageReads = 0;
++	dev->nPageWrites = 0;
++	dev->nBlockErasures = 0;
++	dev->nGCCopies = 0;
++	dev->nRetriedWrites = 0;
++
++	dev->nRetiredBlocks = 0;
++
++	yaffs_VerifyFreeChunks(dev);
++	yaffs_VerifyBlocks(dev);
++	
++
++	T(YAFFS_TRACE_TRACING,
++	  (TSTR("yaffs: yaffs_GutsInitialise() done.\n" TENDSTR)));
++	return YAFFS_OK;
++
++}
++
++void yaffs_Deinitialise(yaffs_Device * dev)
++{
++	if (dev->isMounted) {
++		int i;
++
++		yaffs_DeinitialiseBlocks(dev);
++		yaffs_DeinitialiseTnodes(dev);
++		yaffs_DeinitialiseObjects(dev);
++		if (dev->nShortOpCaches > 0 &&
++		    dev->srCache) {
++
++			for (i = 0; i < dev->nShortOpCaches; i++) {
++				if(dev->srCache[i].data)
++					YFREE(dev->srCache[i].data);
++				dev->srCache[i].data = NULL;
++			}
++
++			YFREE(dev->srCache);
++			dev->srCache = NULL;
++		}
++
++		YFREE(dev->gcCleanupList);
++
++		for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++			YFREE(dev->tempBuffer[i].buffer);
++		}
++
++		dev->isMounted = 0;
++	}
++
++}
++
++static int yaffs_CountFreeChunks(yaffs_Device * dev)
++{
++	int nFree;
++	int b;
++
++	yaffs_BlockInfo *blk;
++
++	for (nFree = 0, b = dev->internalStartBlock; b <= dev->internalEndBlock;
++	     b++) {
++		blk = yaffs_GetBlockInfo(dev, b);
++
++		switch (blk->blockState) {
++		case YAFFS_BLOCK_STATE_EMPTY:
++		case YAFFS_BLOCK_STATE_ALLOCATING:
++		case YAFFS_BLOCK_STATE_COLLECTING:
++		case YAFFS_BLOCK_STATE_FULL:
++			nFree +=
++			    (dev->nChunksPerBlock - blk->pagesInUse +
++			     blk->softDeletions);
++			break;
++		default:
++			break;
++		}
++
++	}
++
++	return nFree;
++}
++
++int yaffs_GetNumberOfFreeChunks(yaffs_Device * dev)
++{
++	/* This is what we report to the outside world */
++
++	int nFree;
++	int nDirtyCacheChunks;
++	int blocksForCheckpoint;
++
++#if 1
++	nFree = dev->nFreeChunks;
++#else
++	nFree = yaffs_CountFreeChunks(dev);
++#endif
++
++	nFree += dev->nDeletedFiles;
++	
++	/* Now count the number of dirty chunks in the cache and subtract those */
++
++	{
++		int i;
++		for (nDirtyCacheChunks = 0, i = 0; i < dev->nShortOpCaches; i++) {
++			if (dev->srCache[i].dirty)
++				nDirtyCacheChunks++;
++		}
++	}
++
++	nFree -= nDirtyCacheChunks;
++
++	nFree -= ((dev->nReservedBlocks + 1) * dev->nChunksPerBlock);
++	
++	/* Now we figure out how much to reserve for the checkpoint and report that... */
++	blocksForCheckpoint = dev->nCheckpointReservedBlocks - dev->blocksInCheckpoint;
++	if(blocksForCheckpoint < 0)
++		blocksForCheckpoint = 0;
++		
++	nFree -= (blocksForCheckpoint * dev->nChunksPerBlock);
++
++	if (nFree < 0)
++		nFree = 0;
++
++	return nFree;
++
++}
++
++static int yaffs_freeVerificationFailures;
++
++static void yaffs_VerifyFreeChunks(yaffs_Device * dev)
++{
++	int counted;
++	int difference;
++	
++	if(yaffs_SkipVerification(dev))
++		return;
++	
++	counted = yaffs_CountFreeChunks(dev);
++
++	difference = dev->nFreeChunks - counted;
++
++	if (difference) {
++		T(YAFFS_TRACE_ALWAYS,
++		  (TSTR("Freechunks verification failure %d %d %d" TENDSTR),
++		   dev->nFreeChunks, counted, difference));
++		yaffs_freeVerificationFailures++;
++	}
++}
++
++/*---------------------------------------- YAFFS test code ----------------------*/
++
++#define yaffs_CheckStruct(structure,syze, name) \
++           if(sizeof(structure) != syze) \
++	       { \
++	         T(YAFFS_TRACE_ALWAYS,(TSTR("%s should be %d but is %d\n" TENDSTR),\
++		 name,syze,sizeof(structure))); \
++	         return YAFFS_FAIL; \
++		}
++
++static int yaffs_CheckStructures(void)
++{
++/*      yaffs_CheckStruct(yaffs_Tags,8,"yaffs_Tags") */
++/*      yaffs_CheckStruct(yaffs_TagsUnion,8,"yaffs_TagsUnion") */
++/*      yaffs_CheckStruct(yaffs_Spare,16,"yaffs_Spare") */
++#ifndef CONFIG_YAFFS_TNODE_LIST_DEBUG
++	yaffs_CheckStruct(yaffs_Tnode, 2 * YAFFS_NTNODES_LEVEL0, "yaffs_Tnode")
++#endif
++	    yaffs_CheckStruct(yaffs_ObjectHeader, 512, "yaffs_ObjectHeader")
++
++	    return YAFFS_OK;
++}
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_guts.h linux-2.6.21.1.new/fs/yaffs2/yaffs_guts.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_guts.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_guts.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,902 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GUTS_H__
++#define __YAFFS_GUTS_H__
++
++#include "devextras.h"
++#include "yportenv.h"
++
++#define YAFFS_OK	1
++#define YAFFS_FAIL  0
++
++/* Give us a  Y=0x59, 
++ * Give us an A=0x41, 
++ * Give us an FF=0xFF 
++ * Give us an S=0x53
++ * And what have we got... 
++ */
++#define YAFFS_MAGIC			0x5941FF53
++
++#define YAFFS_NTNODES_LEVEL0	  	16
++#define YAFFS_TNODES_LEVEL0_BITS	4
++#define YAFFS_TNODES_LEVEL0_MASK	0xf
++
++#define YAFFS_NTNODES_INTERNAL 		(YAFFS_NTNODES_LEVEL0 / 2)
++#define YAFFS_TNODES_INTERNAL_BITS 	(YAFFS_TNODES_LEVEL0_BITS - 1)
++#define YAFFS_TNODES_INTERNAL_MASK	0x7
++#define YAFFS_TNODES_MAX_LEVEL		6
++
++#ifndef CONFIG_YAFFS_NO_YAFFS1
++#define YAFFS_BYTES_PER_SPARE		16
++#define YAFFS_BYTES_PER_CHUNK		512
++#define YAFFS_CHUNK_SIZE_SHIFT		9
++#define YAFFS_CHUNKS_PER_BLOCK		32
++#define YAFFS_BYTES_PER_BLOCK		(YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
++#endif
++
++#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 	1024
++#define YAFFS_MIN_YAFFS2_SPARE_SIZE	32
++
++#define YAFFS_MAX_CHUNK_ID		0x000FFFFF
++
++#define YAFFS_UNUSED_OBJECT_ID		0x0003FFFF
++
++#define YAFFS_ALLOCATION_NOBJECTS	100
++#define YAFFS_ALLOCATION_NTNODES	100
++#define YAFFS_ALLOCATION_NLINKS		100
++
++#define YAFFS_NOBJECT_BUCKETS		256
++
++
++#define YAFFS_OBJECT_SPACE		0x40000
++
++#define YAFFS_CHECKPOINT_VERSION 	3
++
++#ifdef CONFIG_YAFFS_UNICODE
++#define YAFFS_MAX_NAME_LENGTH		127
++#define YAFFS_MAX_ALIAS_LENGTH		79
++#else
++#define YAFFS_MAX_NAME_LENGTH		255
++#define YAFFS_MAX_ALIAS_LENGTH		159
++#endif
++
++#define YAFFS_SHORT_NAME_LENGTH		15
++
++/* Some special object ids for pseudo objects */
++#define YAFFS_OBJECTID_ROOT		1
++#define YAFFS_OBJECTID_LOSTNFOUND	2
++#define YAFFS_OBJECTID_UNLINKED		3
++#define YAFFS_OBJECTID_DELETED		4
++
++/* Sseudo object ids for checkpointing */
++#define YAFFS_OBJECTID_SB_HEADER	0x10
++#define YAFFS_OBJECTID_CHECKPOINT_DATA	0x20
++#define YAFFS_SEQUENCE_CHECKPOINT_DATA  0x21
++
++/* */
++
++#define YAFFS_MAX_SHORT_OP_CACHES	20
++
++#define YAFFS_N_TEMP_BUFFERS		4
++
++/* We limit the number attempts at sucessfully saving a chunk of data.
++ * Small-page devices have 32 pages per block; large-page devices have 64.
++ * Default to something in the order of 5 to 10 blocks worth of chunks.
++ */
++#define YAFFS_WR_ATTEMPTS		(5*64)
++
++/* Sequence numbers are used in YAFFS2 to determine block allocation order.
++ * The range is limited slightly to help distinguish bad numbers from good.
++ * This also allows us to perhaps in the future use special numbers for
++ * special purposes.
++ * EFFFFF00 allows the allocation of 8 blocks per second (~1Mbytes) for 15 years, 
++ * and is a larger number than the lifetime of a 2GB device.
++ */
++#define YAFFS_LOWEST_SEQUENCE_NUMBER	0x00001000
++#define YAFFS_HIGHEST_SEQUENCE_NUMBER	0xEFFFFF00
++
++/* ChunkCache is used for short read/write operations.*/
++typedef struct {
++	struct yaffs_ObjectStruct *object;
++	int chunkId;
++	int lastUse;
++	int dirty;
++	int nBytes;		/* Only valid if the cache is dirty */
++	int locked;		/* Can't push out or flush while locked. */
++#ifdef CONFIG_YAFFS_YAFFS2
++	__u8 *data;
++#else
++	__u8 data[YAFFS_BYTES_PER_CHUNK];
++#endif
++} yaffs_ChunkCache;
++
++
++
++/* Tags structures in RAM
++ * NB This uses bitfield. Bitfields should not straddle a u32 boundary otherwise
++ * the structure size will get blown out.
++ */
++
++#ifndef CONFIG_YAFFS_NO_YAFFS1
++typedef struct {
++	unsigned chunkId:20;
++	unsigned serialNumber:2;
++	unsigned byteCount:10;
++	unsigned objectId:18;
++	unsigned ecc:12;
++	unsigned unusedStuff:2;
++
++} yaffs_Tags;
++
++typedef union {
++	yaffs_Tags asTags;
++	__u8 asBytes[8];
++} yaffs_TagsUnion;
++
++#endif
++
++/* Stuff used for extended tags in YAFFS2 */
++
++typedef enum {
++	YAFFS_ECC_RESULT_UNKNOWN,
++	YAFFS_ECC_RESULT_NO_ERROR,
++	YAFFS_ECC_RESULT_FIXED,
++	YAFFS_ECC_RESULT_UNFIXED
++} yaffs_ECCResult;
++
++typedef enum {
++	YAFFS_OBJECT_TYPE_UNKNOWN,
++	YAFFS_OBJECT_TYPE_FILE,
++	YAFFS_OBJECT_TYPE_SYMLINK,
++	YAFFS_OBJECT_TYPE_DIRECTORY,
++	YAFFS_OBJECT_TYPE_HARDLINK,
++	YAFFS_OBJECT_TYPE_SPECIAL
++} yaffs_ObjectType;
++
++#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
++
++typedef struct {
++
++	unsigned validMarker0;
++	unsigned chunkUsed;	/*  Status of the chunk: used or unused */
++	unsigned objectId;	/* If 0 then this is not part of an object (unused) */
++	unsigned chunkId;	/* If 0 then this is a header, else a data chunk */
++	unsigned byteCount;	/* Only valid for data chunks */
++
++	/* The following stuff only has meaning when we read */
++	yaffs_ECCResult eccResult;
++	unsigned blockBad;	
++
++	/* YAFFS 1 stuff */
++	unsigned chunkDeleted;	/* The chunk is marked deleted */
++	unsigned serialNumber;	/* Yaffs1 2-bit serial number */
++
++	/* YAFFS2 stuff */
++	unsigned sequenceNumber;	/* The sequence number of this block */
++
++	/* Extra info if this is an object header (YAFFS2 only) */
++
++	unsigned extraHeaderInfoAvailable;	/* There is extra info available if this is not zero */
++	unsigned extraParentObjectId;	/* The parent object */
++	unsigned extraIsShrinkHeader;	/* Is it a shrink header? */
++	unsigned extraShadows;		/* Does this shadow another object? */
++
++	yaffs_ObjectType extraObjectType;	/* What object type? */
++
++	unsigned extraFileLength;		/* Length if it is a file */
++	unsigned extraEquivalentObjectId;	/* Equivalent object Id if it is a hard link */
++
++	unsigned validMarker1;
++
++} yaffs_ExtendedTags;
++
++/* Spare structure for YAFFS1 */
++typedef struct {
++	__u8 tagByte0;
++	__u8 tagByte1;
++	__u8 tagByte2;
++	__u8 tagByte3;
++	__u8 pageStatus;	/* set to 0 to delete the chunk */
++	__u8 blockStatus;
++	__u8 tagByte4;
++	__u8 tagByte5;
++	__u8 ecc1[3];
++	__u8 tagByte6;
++	__u8 tagByte7;
++	__u8 ecc2[3];
++} yaffs_Spare;
++
++/*Special structure for passing through to mtd */
++struct yaffs_NANDSpare {
++	yaffs_Spare spare;
++	int eccres1;
++	int eccres2;
++};
++
++/* Block data in RAM */
++
++typedef enum {
++	YAFFS_BLOCK_STATE_UNKNOWN = 0,
++
++	YAFFS_BLOCK_STATE_SCANNING,
++	YAFFS_BLOCK_STATE_NEEDS_SCANNING,
++	/* The block might have something on it (ie it is allocating or full, perhaps empty)
++	 * but it needs to be scanned to determine its true state.
++	 * This state is only valid during yaffs_Scan.
++	 * NB We tolerate empty because the pre-scanner might be incapable of deciding
++	 * However, if this state is returned on a YAFFS2 device, then we expect a sequence number
++	 */
++
++	YAFFS_BLOCK_STATE_EMPTY,
++	/* This block is empty */
++
++	YAFFS_BLOCK_STATE_ALLOCATING,
++	/* This block is partially allocated. 
++	 * At least one page holds valid data.
++	 * This is the one currently being used for page
++	 * allocation. Should never be more than one of these
++	 */
++
++	YAFFS_BLOCK_STATE_FULL,	
++	/* All the pages in this block have been allocated.
++	 */
++
++	YAFFS_BLOCK_STATE_DIRTY,
++	/* All pages have been allocated and deleted. 
++	 * Erase me, reuse me.
++	 */
++
++	YAFFS_BLOCK_STATE_CHECKPOINT,	
++	/* This block is assigned to holding checkpoint data.
++	 */
++
++	YAFFS_BLOCK_STATE_COLLECTING,	
++	/* This block is being garbage collected */
++
++	YAFFS_BLOCK_STATE_DEAD	
++	/* This block has failed and is not in use */
++} yaffs_BlockState;
++
++#define	YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
++
++
++typedef struct {
++
++	int softDeletions:10;	/* number of soft deleted pages */
++	int pagesInUse:10;	/* number of pages in use */
++	yaffs_BlockState blockState:4;	/* One of the above block states */
++	__u32 needsRetiring:1;	/* Data has failed on this block, need to get valid data off */
++                        	/* and retire the block. */
++	__u32 skipErasedCheck: 1; /* If this is set we can skip the erased check on this block */
++	__u32 gcPrioritise: 1; 	/* An ECC check or blank check has failed on this block. 
++				   It should be prioritised for GC */
++        __u32 chunkErrorStrikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
++
++#ifdef CONFIG_YAFFS_YAFFS2
++	__u32 hasShrinkHeader:1; /* This block has at least one shrink object header */
++	__u32 sequenceNumber;	 /* block sequence number for yaffs2 */
++#endif
++
++} yaffs_BlockInfo;
++
++/* -------------------------- Object structure -------------------------------*/
++/* This is the object structure as stored on NAND */
++
++typedef struct {
++	yaffs_ObjectType type;
++
++	/* Apply to everything  */
++	int parentObjectId;
++	__u16 sum__NoLongerUsed;	/* checksum of name. No longer used */
++	YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++	/* Thes following apply to directories, files, symlinks - not hard links */
++	__u32 yst_mode;		/* protection */
++
++#ifdef CONFIG_YAFFS_WINCE
++	__u32 notForWinCE[5];
++#else
++	__u32 yst_uid;
++	__u32 yst_gid;
++	__u32 yst_atime;
++	__u32 yst_mtime;
++	__u32 yst_ctime;
++#endif
++
++	/* File size  applies to files only */
++	int fileSize;
++
++	/* Equivalent object id applies to hard links only. */
++	int equivalentObjectId;
++
++	/* Alias is for symlinks only. */
++	YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
++
++	__u32 yst_rdev;		/* device stuff for block and char devices (major/min) */
++
++#ifdef CONFIG_YAFFS_WINCE
++	__u32 win_ctime[2];
++	__u32 win_atime[2];
++	__u32 win_mtime[2];
++	__u32 roomToGrow[4];
++#else
++	__u32 roomToGrow[10];
++#endif
++
++	int shadowsObject;	/* This object header shadows the specified object if > 0 */
++
++	/* isShrink applies to object headers written when we shrink the file (ie resize) */
++	__u32 isShrink;
++
++} yaffs_ObjectHeader;
++
++/*--------------------------- Tnode -------------------------- */
++
++union yaffs_Tnode_union {
++#ifdef CONFIG_YAFFS_TNODE_LIST_DEBUG
++	union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL + 1];
++#else
++	union yaffs_Tnode_union *internal[YAFFS_NTNODES_INTERNAL];
++#endif
++/*	__u16 level0[YAFFS_NTNODES_LEVEL0]; */
++
++};
++
++typedef union yaffs_Tnode_union yaffs_Tnode;
++
++struct yaffs_TnodeList_struct {
++	struct yaffs_TnodeList_struct *next;
++	yaffs_Tnode *tnodes;
++};
++
++typedef struct yaffs_TnodeList_struct yaffs_TnodeList;
++
++/*------------------------  Object -----------------------------*/
++/* An object can be one of:
++ * - a directory (no data, has children links
++ * - a regular file (data.... not prunes :->).
++ * - a symlink [symbolic link] (the alias).
++ * - a hard link
++ */
++
++typedef struct {
++	__u32 fileSize;
++	__u32 scannedFileSize;
++	__u32 shrinkSize;
++	int topLevel;
++	yaffs_Tnode *top;
++} yaffs_FileStructure;
++
++typedef struct {
++	struct list_head children;	/* list of child links */
++} yaffs_DirectoryStructure;
++
++typedef struct {
++	YCHAR *alias;
++} yaffs_SymLinkStructure;
++
++typedef struct {
++	struct yaffs_ObjectStruct *equivalentObject;
++	__u32 equivalentObjectId;
++} yaffs_HardLinkStructure;
++
++typedef union {
++	yaffs_FileStructure fileVariant;
++	yaffs_DirectoryStructure directoryVariant;
++	yaffs_SymLinkStructure symLinkVariant;
++	yaffs_HardLinkStructure hardLinkVariant;
++} yaffs_ObjectVariant;
++
++struct yaffs_ObjectStruct {
++	__u8 deleted:1;		/* This should only apply to unlinked files. */
++	__u8 softDeleted:1;	/* it has also been soft deleted */
++	__u8 unlinked:1;	/* An unlinked file. The file should be in the unlinked directory.*/
++	__u8 fake:1;		/* A fake object has no presence on NAND. */
++	__u8 renameAllowed:1;	/* Some objects are not allowed to be renamed. */
++	__u8 unlinkAllowed:1;
++	__u8 dirty:1;		/* the object needs to be written to flash */
++	__u8 valid:1;		/* When the file system is being loaded up, this 
++				 * object might be created before the data
++				 * is available (ie. file data records appear before the header).
++				 */
++	__u8 lazyLoaded:1;	/* This object has been lazy loaded and is missing some detail */
++
++	__u8 deferedFree:1;	/* For Linux kernel. Object is removed from NAND, but is
++				 * still in the inode cache. Free of object is defered.
++				 * until the inode is released.
++				 */
++
++	__u8 serial;		/* serial number of chunk in NAND. Cached here */
++	__u16 sum;		/* sum of the name to speed searching */
++
++	struct yaffs_DeviceStruct *myDev;	/* The device I'm on */
++
++	struct list_head hashLink;	/* list of objects in this hash bucket */
++
++	struct list_head hardLinks;	/* all the equivalent hard linked objects */
++
++	/* directory structure stuff */
++	/* also used for linking up the free list */
++	struct yaffs_ObjectStruct *parent; 
++	struct list_head siblings;
++
++	/* Where's my object header in NAND? */
++	int chunkId;		
++
++	int nDataChunks;	/* Number of data chunks attached to the file. */
++
++	__u32 objectId;		/* the object id value */
++
++	__u32 yst_mode;
++
++#ifdef CONFIG_YAFFS_SHORT_NAMES_IN_RAM
++	YCHAR shortName[YAFFS_SHORT_NAME_LENGTH + 1];
++#endif
++
++#ifndef __KERNEL__
++	__u32 inUse;
++#endif
++
++#ifdef CONFIG_YAFFS_WINCE
++	__u32 win_ctime[2];
++	__u32 win_mtime[2];
++	__u32 win_atime[2];
++#else
++	__u32 yst_uid;
++	__u32 yst_gid;
++	__u32 yst_atime;
++	__u32 yst_mtime;
++	__u32 yst_ctime;
++#endif
++
++	__u32 yst_rdev;
++
++#ifdef __KERNEL__
++	struct inode *myInode;
++
++#endif
++
++	yaffs_ObjectType variantType;
++
++	yaffs_ObjectVariant variant;
++
++};
++
++typedef struct yaffs_ObjectStruct yaffs_Object;
++
++struct yaffs_ObjectList_struct {
++	yaffs_Object *objects;
++	struct yaffs_ObjectList_struct *next;
++};
++
++typedef struct yaffs_ObjectList_struct yaffs_ObjectList;
++
++typedef struct {
++	struct list_head list;
++	int count;
++} yaffs_ObjectBucket;
++
++
++/* yaffs_CheckpointObject holds the definition of an object as dumped 
++ * by checkpointing.
++ */
++
++typedef struct {
++        int structType;
++	__u32 objectId;		
++	__u32 parentId;
++	int chunkId;
++			
++	yaffs_ObjectType variantType:3;
++	__u8 deleted:1;		
++	__u8 softDeleted:1;	
++	__u8 unlinked:1;	
++	__u8 fake:1;		
++	__u8 renameAllowed:1;
++	__u8 unlinkAllowed:1;
++	__u8 serial;		
++	
++	int nDataChunks;	
++	__u32 fileSizeOrEquivalentObjectId;
++
++}yaffs_CheckpointObject;
++
++/*--------------------- Temporary buffers ----------------
++ *
++ * These are chunk-sized working buffers. Each device has a few
++ */
++
++typedef struct {
++	__u8 *buffer;
++	int line;	/* track from whence this buffer was allocated */
++	int maxLine;
++} yaffs_TempBuffer;
++
++/*----------------- Device ---------------------------------*/
++
++struct yaffs_DeviceStruct {
++	struct list_head devList;
++	const char *name;
++
++	/* Entry parameters set up way early. Yaffs sets up the rest.*/
++	int nDataBytesPerChunk;	/* Should be a power of 2 >= 512 */
++	int nChunksPerBlock;	/* does not need to be a power of 2 */
++	int nBytesPerSpare;	/* spare area size */
++	int startBlock;		/* Start block we're allowed to use */
++	int endBlock;		/* End block we're allowed to use */
++	int nReservedBlocks;	/* We want this tuneable so that we can reduce */
++				/* reserved blocks on NOR and RAM. */
++	
++	
++	/* Stuff used by the shared space checkpointing mechanism */
++	/* If this value is zero, then this mechanism is disabled */
++	
++	int nCheckpointReservedBlocks; /* Blocks to reserve for checkpoint data */
++
++	
++
++
++	int nShortOpCaches;	/* If <= 0, then short op caching is disabled, else
++				 * the number of short op caches (don't use too many)
++				 */
++
++	int useHeaderFileSize;	/* Flag to determine if we should use file sizes from the header */
++
++	int useNANDECC;		/* Flag to decide whether or not to use NANDECC */
++
++	void *genericDevice;	/* Pointer to device context
++				 * On an mtd this holds the mtd pointer.
++				 */
++        void *superBlock;
++        
++	/* NAND access functions (Must be set before calling YAFFS)*/
++
++	int (*writeChunkToNAND) (struct yaffs_DeviceStruct * dev,
++				 int chunkInNAND, const __u8 * data,
++				 const yaffs_Spare * spare);
++	int (*readChunkFromNAND) (struct yaffs_DeviceStruct * dev,
++				  int chunkInNAND, __u8 * data,
++				  yaffs_Spare * spare);
++	int (*eraseBlockInNAND) (struct yaffs_DeviceStruct * dev,
++				 int blockInNAND);
++	int (*initialiseNAND) (struct yaffs_DeviceStruct * dev);
++
++#ifdef CONFIG_YAFFS_YAFFS2
++	int (*writeChunkWithTagsToNAND) (struct yaffs_DeviceStruct * dev,
++					 int chunkInNAND, const __u8 * data,
++					 const yaffs_ExtendedTags * tags);
++	int (*readChunkWithTagsFromNAND) (struct yaffs_DeviceStruct * dev,
++					  int chunkInNAND, __u8 * data,
++					  yaffs_ExtendedTags * tags);
++	int (*markNANDBlockBad) (struct yaffs_DeviceStruct * dev, int blockNo);
++	int (*queryNANDBlock) (struct yaffs_DeviceStruct * dev, int blockNo,
++			       yaffs_BlockState * state, int *sequenceNumber);
++#endif
++
++	int isYaffs2;
++	
++	/* The removeObjectCallback function must be supplied by OS flavours that 
++	 * need it. The Linux kernel does not use this, but yaffs direct does use
++	 * it to implement the faster readdir
++	 */
++	void (*removeObjectCallback)(struct yaffs_ObjectStruct *obj);
++	
++	/* Callback to mark the superblock dirsty */
++	void (*markSuperBlockDirty)(void * superblock);
++	
++	int wideTnodesDisabled; /* Set to disable wide tnodes */
++	
++
++	/* End of stuff that must be set before initialisation. */
++	
++	/* Checkpoint control. Can be set before or after initialisation */
++	__u8 skipCheckpointRead;
++	__u8 skipCheckpointWrite;
++
++	/* Runtime parameters. Set up by YAFFS. */
++
++	__u16 chunkGroupBits;	/* 0 for devices <= 32MB. else log2(nchunks) - 16 */
++	__u16 chunkGroupSize;	/* == 2^^chunkGroupBits */
++	
++	/* Stuff to support wide tnodes */
++	__u32 tnodeWidth;
++	__u32 tnodeMask;
++	
++	/* Stuff to support various file offses to chunk/offset translations */
++	/* "Crumbs" for nDataBytesPerChunk not being a power of 2 */
++	__u32 crumbMask;
++	__u32 crumbShift;
++	__u32 crumbsPerChunk;
++	
++	/* Straight shifting for nDataBytesPerChunk being a power of 2 */
++	__u32 chunkShift;
++	__u32 chunkMask;
++	
++
++#ifdef __KERNEL__
++
++	struct semaphore sem;	/* Semaphore for waiting on erasure.*/
++	struct semaphore grossLock;	/* Gross locking semaphore */
++	__u8 *spareBuffer;	/* For mtdif2 use. Don't know the size of the buffer 
++				 * at compile time so we have to allocate it.
++				 */
++	void (*putSuperFunc) (struct super_block * sb);
++#endif
++
++	int isMounted;
++	
++	int isCheckpointed;
++
++
++	/* Stuff to support block offsetting to support start block zero */
++	int internalStartBlock;
++	int internalEndBlock;
++	int blockOffset;
++	int chunkOffset;
++	
++
++	/* Runtime checkpointing stuff */
++	int checkpointPageSequence;   /* running sequence number of checkpoint pages */
++	int checkpointByteCount;
++	int checkpointByteOffset;
++	__u8 *checkpointBuffer;
++	int checkpointOpenForWrite;
++	int blocksInCheckpoint;
++	int checkpointCurrentChunk;
++	int checkpointCurrentBlock;
++	int checkpointNextBlock;
++	int *checkpointBlockList;
++	int checkpointMaxBlocks;
++	__u32 checkpointSum;
++	__u32 checkpointXor;
++	
++	/* Block Info */
++	yaffs_BlockInfo *blockInfo;
++	__u8 *chunkBits;	/* bitmap of chunks in use */
++	unsigned blockInfoAlt:1;	/* was allocated using alternative strategy */
++	unsigned chunkBitsAlt:1;	/* was allocated using alternative strategy */
++	int chunkBitmapStride;	/* Number of bytes of chunkBits per block. 
++				 * Must be consistent with nChunksPerBlock.
++				 */
++
++	int nErasedBlocks;
++	int allocationBlock;	/* Current block being allocated off */
++	__u32 allocationPage;
++	int allocationBlockFinder;	/* Used to search for next allocation block */
++
++	/* Runtime state */
++	int nTnodesCreated;
++	yaffs_Tnode *freeTnodes;
++	int nFreeTnodes;
++	yaffs_TnodeList *allocatedTnodeList;
++
++	int isDoingGC;
++
++	int nObjectsCreated;
++	yaffs_Object *freeObjects;
++	int nFreeObjects;
++
++	yaffs_ObjectList *allocatedObjectList;
++
++	yaffs_ObjectBucket objectBucket[YAFFS_NOBJECT_BUCKETS];
++
++	int nFreeChunks;
++
++	int currentDirtyChecker;	/* Used to find current dirtiest block */
++
++	__u32 *gcCleanupList;	/* objects to delete at the end of a GC. */
++	int nonAggressiveSkip;	/* GC state/mode */
++
++	/* Statistcs */
++	int nPageWrites;
++	int nPageReads;
++	int nBlockErasures;
++	int nErasureFailures;
++	int nGCCopies;
++	int garbageCollections;
++	int passiveGarbageCollections;
++	int nRetriedWrites;
++	int nRetiredBlocks;
++	int eccFixed;
++	int eccUnfixed;
++	int tagsEccFixed;
++	int tagsEccUnfixed;
++	int nDeletions;
++	int nUnmarkedDeletions;
++	
++	int hasPendingPrioritisedGCs; /* We think this device might have pending prioritised gcs */
++
++	/* Special directories */
++	yaffs_Object *rootDir;
++	yaffs_Object *lostNFoundDir;
++
++	/* Buffer areas for storing data to recover from write failures TODO
++	 *      __u8            bufferedData[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
++	 *      yaffs_Spare bufferedSpare[YAFFS_CHUNKS_PER_BLOCK];
++	 */
++	
++	int bufferedBlock;	/* Which block is buffered here? */
++	int doingBufferedBlockRewrite;
++
++	yaffs_ChunkCache *srCache;
++	int srLastUse;
++
++	int cacheHits;
++
++	/* Stuff for background deletion and unlinked files.*/
++	yaffs_Object *unlinkedDir;	/* Directory where unlinked and deleted files live. */
++	yaffs_Object *deletedDir;	/* Directory where deleted objects are sent to disappear. */
++	yaffs_Object *unlinkedDeletion;	/* Current file being background deleted.*/
++	int nDeletedFiles;		/* Count of files awaiting deletion;*/
++	int nUnlinkedFiles;		/* Count of unlinked files. */
++	int nBackgroundDeletions;	/* Count of background deletions. */
++
++
++	yaffs_TempBuffer tempBuffer[YAFFS_N_TEMP_BUFFERS];
++	int maxTemp;
++	int unmanagedTempAllocations;
++	int unmanagedTempDeallocations;
++
++	/* yaffs2 runtime stuff */
++	unsigned sequenceNumber;	/* Sequence number of currently allocating block */
++	unsigned oldestDirtySequence;
++
++};
++
++typedef struct yaffs_DeviceStruct yaffs_Device;
++
++/* The static layout of bllock usage etc is stored in the super block header */
++typedef struct {
++        int StructType;
++	int version;
++	int checkpointStartBlock;
++	int checkpointEndBlock;
++	int startBlock;
++	int endBlock;
++	int rfu[100];
++} yaffs_SuperBlockHeader;
++	
++/* The CheckpointDevice structure holds the device information that changes at runtime and
++ * must be preserved over unmount/mount cycles.
++ */
++typedef struct {
++        int structType;
++	int nErasedBlocks;
++	int allocationBlock;	/* Current block being allocated off */
++	__u32 allocationPage;
++	int nFreeChunks;
++
++	int nDeletedFiles;		/* Count of files awaiting deletion;*/
++	int nUnlinkedFiles;		/* Count of unlinked files. */
++	int nBackgroundDeletions;	/* Count of background deletions. */
++
++	/* yaffs2 runtime stuff */
++	unsigned sequenceNumber;	/* Sequence number of currently allocating block */
++	unsigned oldestDirtySequence;
++
++} yaffs_CheckpointDevice;
++
++
++typedef struct {
++    int structType;
++    __u32 magic;
++    __u32 version;
++    __u32 head;
++} yaffs_CheckpointValidity;
++
++/* Function to manipulate block info */
++static Y_INLINE yaffs_BlockInfo *yaffs_GetBlockInfo(yaffs_Device * dev, int blk)
++{
++	if (blk < dev->internalStartBlock || blk > dev->internalEndBlock) {
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR
++		   ("**>> yaffs: getBlockInfo block %d is not valid" TENDSTR),
++		   blk));
++		YBUG();
++	}
++	return &dev->blockInfo[blk - dev->internalStartBlock];
++}
++
++/*----------------------- YAFFS Functions -----------------------*/
++
++int yaffs_GutsInitialise(yaffs_Device * dev);
++void yaffs_Deinitialise(yaffs_Device * dev);
++
++int yaffs_GetNumberOfFreeChunks(yaffs_Device * dev);
++
++int yaffs_RenameObject(yaffs_Object * oldDir, const YCHAR * oldName,
++		       yaffs_Object * newDir, const YCHAR * newName);
++
++int yaffs_Unlink(yaffs_Object * dir, const YCHAR * name);
++int yaffs_DeleteFile(yaffs_Object * obj);
++
++int yaffs_GetObjectName(yaffs_Object * obj, YCHAR * name, int buffSize);
++int yaffs_GetObjectFileLength(yaffs_Object * obj);
++int yaffs_GetObjectInode(yaffs_Object * obj);
++unsigned yaffs_GetObjectType(yaffs_Object * obj);
++int yaffs_GetObjectLinkCount(yaffs_Object * obj);
++
++int yaffs_SetAttributes(yaffs_Object * obj, struct iattr *attr);
++int yaffs_GetAttributes(yaffs_Object * obj, struct iattr *attr);
++
++/* File operations */
++int yaffs_ReadDataFromFile(yaffs_Object * obj, __u8 * buffer, loff_t offset,
++			   int nBytes);
++int yaffs_WriteDataToFile(yaffs_Object * obj, const __u8 * buffer, loff_t offset,
++			  int nBytes, int writeThrough);
++int yaffs_ResizeFile(yaffs_Object * obj, loff_t newSize);
++
++yaffs_Object *yaffs_MknodFile(yaffs_Object * parent, const YCHAR * name,
++			      __u32 mode, __u32 uid, __u32 gid);
++int yaffs_FlushFile(yaffs_Object * obj, int updateTime);
++
++/* Flushing and checkpointing */
++void yaffs_FlushEntireDeviceCache(yaffs_Device *dev);
++
++int yaffs_CheckpointSave(yaffs_Device *dev);
++int yaffs_CheckpointRestore(yaffs_Device *dev);
++
++/* Directory operations */
++yaffs_Object *yaffs_MknodDirectory(yaffs_Object * parent, const YCHAR * name,
++				   __u32 mode, __u32 uid, __u32 gid);
++yaffs_Object *yaffs_FindObjectByName(yaffs_Object * theDir, const YCHAR * name);
++int yaffs_ApplyToDirectoryChildren(yaffs_Object * theDir,
++				   int (*fn) (yaffs_Object *));
++
++yaffs_Object *yaffs_FindObjectByNumber(yaffs_Device * dev, __u32 number);
++
++/* Link operations */
++yaffs_Object *yaffs_Link(yaffs_Object * parent, const YCHAR * name,
++			 yaffs_Object * equivalentObject);
++
++yaffs_Object *yaffs_GetEquivalentObject(yaffs_Object * obj);
++
++/* Symlink operations */
++yaffs_Object *yaffs_MknodSymLink(yaffs_Object * parent, const YCHAR * name,
++				 __u32 mode, __u32 uid, __u32 gid,
++				 const YCHAR * alias);
++YCHAR *yaffs_GetSymlinkAlias(yaffs_Object * obj);
++
++/* Special inodes (fifos, sockets and devices) */
++yaffs_Object *yaffs_MknodSpecial(yaffs_Object * parent, const YCHAR * name,
++				 __u32 mode, __u32 uid, __u32 gid, __u32 rdev);
++
++/* Special directories */
++yaffs_Object *yaffs_Root(yaffs_Device * dev);
++yaffs_Object *yaffs_LostNFound(yaffs_Device * dev);
++
++#ifdef CONFIG_YAFFS_WINCE
++/* CONFIG_YAFFS_WINCE special stuff */
++void yfsd_WinFileTimeNow(__u32 target[2]);
++#endif
++
++#ifdef __KERNEL__
++
++void yaffs_HandleDeferedFree(yaffs_Object * obj);
++#endif
++
++/* Debug dump  */
++int yaffs_DumpObject(yaffs_Object * obj);
++
++void yaffs_GutsTest(yaffs_Device * dev);
++
++/* A few useful functions */
++void yaffs_InitialiseTags(yaffs_ExtendedTags * tags);
++void yaffs_DeleteChunk(yaffs_Device * dev, int chunkId, int markNAND, int lyn);
++int yaffs_CheckFF(__u8 * buffer, int nBytes);
++void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi);
++
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_mtdif.c linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_mtdif.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,241 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++const char *yaffs_mtdif_c_version =
++    "$Id: yaffs_mtdif.c,v 1.19 2007-02-14 01:09:06 wookey Exp $";
++
++#include "yportenv.h"
++
++
++#include "yaffs_mtdif.h"
++
++#include "linux/mtd/mtd.h"
++#include "linux/types.h"
++#include "linux/time.h"
++#include "linux/mtd/nand.h"
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
++static struct nand_oobinfo yaffs_oobinfo = {
++	.useecc = 1,
++	.eccbytes = 6,
++	.eccpos = {8, 9, 10, 13, 14, 15}
++};
++
++static struct nand_oobinfo yaffs_noeccinfo = {
++	.useecc = 0,
++};
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++static inline void translate_spare2oob(const yaffs_Spare *spare, __u8 *oob)
++{
++	oob[0] = spare->tagByte0;
++	oob[1] = spare->tagByte1;
++	oob[2] = spare->tagByte2;
++	oob[3] = spare->tagByte3;
++	oob[4] = spare->tagByte4;
++	oob[5] = spare->tagByte5 & 0x3f;
++	oob[5] |= spare->blockStatus == 'Y' ? 0: 0x80;
++	oob[5] |= spare->pageStatus == 0 ? 0: 0x40;
++	oob[6] = spare->tagByte6;
++	oob[7] = spare->tagByte7;
++}
++
++static inline void translate_oob2spare(yaffs_Spare *spare, __u8 *oob)
++{
++	struct yaffs_NANDSpare *nspare = (struct yaffs_NANDSpare *)spare;
++	spare->tagByte0 = oob[0];
++	spare->tagByte1 = oob[1];
++	spare->tagByte2 = oob[2];
++	spare->tagByte3 = oob[3];
++	spare->tagByte4 = oob[4];
++	spare->tagByte5 = oob[5] == 0xff ? 0xff : oob[5] & 0x3f;
++	spare->blockStatus = oob[5] & 0x80 ? 0xff : 'Y';
++	spare->pageStatus = oob[5] & 0x40 ? 0xff : 0;
++	spare->ecc1[0] = spare->ecc1[1] = spare->ecc1[2] = 0xff;
++	spare->tagByte6 = oob[6];
++	spare->tagByte7 = oob[7];
++	spare->ecc2[0] = spare->ecc2[1] = spare->ecc2[2] = 0xff;
++
++	nspare->eccres1 = nspare->eccres2 = 0; /* FIXME */
++}
++#endif
++
++int nandmtd_WriteChunkToNAND(yaffs_Device * dev, int chunkInNAND,
++			     const __u8 * data, const yaffs_Spare * spare)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	struct mtd_oob_ops ops;
++#endif
++	size_t dummy;
++	int retval = 0;
++
++	loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	__u8 spareAsBytes[8]; /* OOB */
++
++	if (data && !spare)
++		retval = mtd->write(mtd, addr, dev->nDataBytesPerChunk,
++				&dummy, data);
++	else if (spare) {
++		if (dev->useNANDECC) {
++			translate_spare2oob(spare, spareAsBytes);
++			ops.mode = MTD_OOB_AUTO;
++			ops.ooblen = 8; /* temp hack */
++		} else {
++			ops.mode = MTD_OOB_RAW;
++			ops.ooblen = YAFFS_BYTES_PER_SPARE;
++		}
++		ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
++		ops.datbuf = (u8 *)data;
++		ops.ooboffs = 0;
++		ops.oobbuf = spareAsBytes;
++		retval = mtd->write_oob(mtd, addr, &ops);
++	}
++#else
++	__u8 *spareAsBytes = (__u8 *) spare;
++
++	if (data && spare) {
++		if (dev->useNANDECC)
++			retval =
++			    mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					   &dummy, data, spareAsBytes,
++					   &yaffs_oobinfo);
++		else
++			retval =
++			    mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					   &dummy, data, spareAsBytes,
++					   &yaffs_noeccinfo);
++	} else {
++		if (data)
++			retval =
++			    mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++				       data);
++		if (spare)
++			retval =
++			    mtd->write_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
++					   &dummy, spareAsBytes);
++	}
++#endif
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd_ReadChunkFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data,
++			      yaffs_Spare * spare)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	struct mtd_oob_ops ops;
++#endif
++	size_t dummy;
++	int retval = 0;
++
++	loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	__u8 spareAsBytes[8]; /* OOB */
++
++	if (data && !spare)
++		retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
++				&dummy, data);
++	else if (spare) {
++		if (dev->useNANDECC) {
++			ops.mode = MTD_OOB_AUTO;
++			ops.ooblen = 8; /* temp hack */
++		} else {
++			ops.mode = MTD_OOB_RAW;
++			ops.ooblen = YAFFS_BYTES_PER_SPARE;
++		}
++		ops.len = data ? dev->nDataBytesPerChunk : ops.ooblen;
++		ops.datbuf = data;
++		ops.ooboffs = 0;
++		ops.oobbuf = spareAsBytes;
++		retval = mtd->read_oob(mtd, addr, &ops);
++		if (dev->useNANDECC)
++			translate_oob2spare(spare, spareAsBytes);
++	}
++#else
++	__u8 *spareAsBytes = (__u8 *) spare;
++
++	if (data && spare) {
++		if (dev->useNANDECC) {	
++			/* Careful, this call adds 2 ints */
++			/* to the end of the spare data.  Calling function */
++			/* should allocate enough memory for spare, */
++			/* i.e. [YAFFS_BYTES_PER_SPARE+2*sizeof(int)]. */
++			retval =
++			    mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					  &dummy, data, spareAsBytes,
++					  &yaffs_oobinfo);
++		} else {
++			retval =
++			    mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					  &dummy, data, spareAsBytes,
++					  &yaffs_noeccinfo);
++		}
++	} else {
++		if (data)
++			retval =
++			    mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++				      data);
++		if (spare)
++			retval =
++			    mtd->read_oob(mtd, addr, YAFFS_BYTES_PER_SPARE,
++					  &dummy, spareAsBytes);
++	}
++#endif
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd_EraseBlockInNAND(yaffs_Device * dev, int blockNumber)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++	__u32 addr =
++	    ((loff_t) blockNumber) * dev->nDataBytesPerChunk
++		* dev->nChunksPerBlock;
++	struct erase_info ei;
++	int retval = 0;
++
++	ei.mtd = mtd;
++	ei.addr = addr;
++	ei.len = dev->nDataBytesPerChunk * dev->nChunksPerBlock;
++	ei.time = 1000;
++	ei.retries = 2;
++	ei.callback = NULL;
++	ei.priv = (u_long) dev;
++
++	/* Todo finish off the ei if required */
++
++	sema_init(&dev->sem, 0);
++
++	retval = mtd->erase(mtd, &ei);
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd_InitialiseNAND(yaffs_Device * dev)
++{
++	return YAFFS_OK;
++}
++
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_mtdif.h linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_mtdif.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,27 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF_H__
++#define __YAFFS_MTDIF_H__
++
++#include "yaffs_guts.h"
++
++int nandmtd_WriteChunkToNAND(yaffs_Device * dev, int chunkInNAND,
++			     const __u8 * data, const yaffs_Spare * spare);
++int nandmtd_ReadChunkFromNAND(yaffs_Device * dev, int chunkInNAND, __u8 * data,
++			      yaffs_Spare * spare);
++int nandmtd_EraseBlockInNAND(yaffs_Device * dev, int blockNumber);
++int nandmtd_InitialiseNAND(yaffs_Device * dev);
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_mtdif1-compat.c linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif1-compat.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_mtdif1-compat.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif1-compat.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,434 @@
++From ian@brightstareng.com Fri May 18 15:06:49 2007
++From ian@brightstareng.com Fri May 18 15:08:21 2007
++Received: from 206.173.66.57.ptr.us.xo.net ([206.173.66.57] helo=zebra.brightstareng.com)
++	by apollo.linkchoose.co.uk with esmtp (Exim 4.60)
++	(envelope-from <ian@brightstareng.com>)
++	id 1Hp380-00011e-T6
++	for david.goodenough@linkchoose.co.uk; Fri, 18 May 2007 15:08:21 +0100
++Received: from localhost (localhost.localdomain [127.0.0.1])
++	by zebra.brightstareng.com (Postfix) with ESMTP
++	id 4819F28C004; Fri, 18 May 2007 10:07:49 -0400 (EDT)
++Received: from zebra.brightstareng.com ([127.0.0.1])
++ by localhost (zebra [127.0.0.1]) (amavisd-new, port 10024) with ESMTP
++ id 05328-06; Fri, 18 May 2007 10:07:16 -0400 (EDT)
++Received: from pippin (unknown [192.168.1.25])
++	by zebra.brightstareng.com (Postfix) with ESMTP
++	id 8BEF528C1BC; Fri, 18 May 2007 10:06:53 -0400 (EDT)
++From: Ian McDonnell <ian@brightstareng.com>
++To: David Goodenough <david.goodenough@linkchoose.co.uk>
++Subject: Re: something tested this time -- yaffs_mtdif1-compat.c
++Date: Fri, 18 May 2007 10:06:49 -0400
++User-Agent: KMail/1.9.1
++References: <200705142207.06909.ian@brightstareng.com> <200705171131.53536.ian@brightstareng.com> <200705181334.32166.david.goodenough@linkchoose.co.uk>
++In-Reply-To: <200705181334.32166.david.goodenough@linkchoose.co.uk>
++Cc: Andrea Conti <alyf@alyf.net>,
++ Charles Manning <manningc2@actrix.gen.nz>
++MIME-Version: 1.0
++Content-Type: Multipart/Mixed;
++  boundary="Boundary-00=_5LbTGmt62YoutxM"
++Message-Id: <200705181006.49860.ian@brightstareng.com>
++X-Virus-Scanned: by amavisd-new at brightstareng.com
++Status: R
++X-Status: NT
++X-KMail-EncryptionState:  
++X-KMail-SignatureState:  
++X-KMail-MDN-Sent:  
++
++--Boundary-00=_5LbTGmt62YoutxM
++Content-Type: text/plain;
++  charset="iso-8859-15"
++Content-Transfer-Encoding: 7bit
++Content-Disposition: inline
++
++David, Andrea,
++
++On Friday 18 May 2007 08:34, you wrote:
++> Yea team.  With this fix in place (I put it in the wrong place
++> at first) I can now mount and ls the Yaffs partition without
++> an error messages!
++
++Good news!
++
++Attached is a newer yaffs_mtdif1.c with a bandaid to help the 
++2.6.18 and 2.6.19 versions of MTD not trip on the oob read.
++See the LINUX_VERSION_CODE conditional in 
++nandmtd1_ReadChunkWithTagsFromNAND.
++
++-imcd
++
++--Boundary-00=_5LbTGmt62YoutxM
++Content-Type: text/x-csrc;
++  charset="iso-8859-15";
++  name="yaffs_mtdif1.c"
++Content-Transfer-Encoding: 7bit
++Content-Disposition: attachment;
++	filename="yaffs_mtdif1.c"
++
++/*
++ * YAFFS: Yet another FFS. A NAND-flash specific file system.
++ * yaffs_mtdif1.c  NAND mtd interface functions for small-page NAND.
++ *
++ * Copyright (C) 2002 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This module provides the interface between yaffs_nand.c and the
++ * MTD API.  This version is used when the MTD interface supports the
++ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
++ * and we have small-page NAND device.
++ *
++ * These functions are invoked via function pointers in yaffs_nand.c.
++ * This replaces functionality provided by functions in yaffs_mtdif.c
++ * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
++ * called in yaffs_mtdif.c when the function pointers are NULL.
++ * We assume the MTD layer is performing ECC (useNANDECC is true).
++ */
++
++#include "yportenv.h"
++#include "yaffs_guts.h"
++#include "yaffs_packedtags1.h"
++#include "yaffs_tagscompat.h"	// for yaffs_CalcTagsECC
++
++#include "linux/kernel.h"
++#include "linux/version.h"
++#include "linux/types.h"
++#include "linux/mtd/mtd.h"
++
++/* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++
++const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.3 2007/05/15 20:16:11 ian Exp $";
++
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++# define YTAG1_SIZE 8
++#else
++# define YTAG1_SIZE 9
++#endif
++
++#if 0
++/* Use the following nand_ecclayout with MTD when using
++ * CONFIG_YAFFS_9BYTE_TAGS and the older on-NAND tags layout.
++ * If you have existing Yaffs images and the byte order differs from this,
++ * adjust 'oobfree' to match your existing Yaffs data.
++ *
++ * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
++ * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
++ * the 9th byte.
++ *
++ * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
++ * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
++ * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
++ * byte and B is the small-page bad-block indicator byte.
++ */
++static struct nand_ecclayout nand_oob_16 = {
++	.eccbytes = 6,
++	.eccpos = { 8, 9, 10, 13, 14, 15 },
++	.oobavail = 9,
++	.oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } }
++};
++#endif
++
++/* Write a chunk (page) of data to NAND.
++ *
++ * Caller always provides ExtendedTags data which are converted to a more
++ * compact (packed) form for storage in NAND.  A mini-ECC runs over the
++ * contents of the tags meta-data; used to valid the tags when read.
++ *
++ *  - Pack ExtendedTags to PackedTags1 form
++ *  - Compute mini-ECC for PackedTags1
++ *  - Write data and packed tags to NAND.
++ *
++ * Note: Due to the use of the PackedTags1 meta-data which does not include
++ * a full sequence number (as found in the larger PackedTags2 form) it is
++ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
++ * discarded and dirty.  This is not ideal: newer NAND parts are supposed
++ * to be written just once.  When Yaffs performs this operation, this
++ * function is called with a NULL data pointer -- calling MTD write_oob
++ * without data is valid usage (2.6.17).
++ *
++ * Any underlying MTD error results in YAFFS_FAIL.
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
++	int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * etags)
++{
++	struct mtd_info * mtd = dev->genericDevice;
++	int chunkBytes = dev->nDataBytesPerChunk;
++	loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++	struct mtd_oob_ops ops;
++	yaffs_PackedTags1 pt1;
++	int retval;
++
++	/* we assume that PackedTags1 and yaffs_Tags are compatible */
++	compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
++	compile_time_assertion(sizeof(yaffs_Tags) == 8);
++
++	yaffs_PackTags1(&pt1, etags);
++	yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
++
++	/* When deleting a chunk, the upper layer provides only skeletal
++	 * etags, one with chunkDeleted set.  However, we need to update the
++	 * tags, not erase them completely.  So we use the NAND write property
++	 * that only zeroed-bits stick and set tag bytes to all-ones and
++	 * zero just the (not) deleted bit.
++	 */
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++	if (etags->chunkDeleted) {
++		memset(&pt1, 0xff, 8);
++		/* clear delete status bit to indicate deleted */
++		pt1.deleted = 0;
++	}
++#else
++	((__u8 *)&pt1)[8] = 0xff;
++	if (etags->chunkDeleted) {
++		memset(&pt1, 0xff, 8);
++		/* zero pageStatus byte to indicate deleted */
++		((__u8 *)&pt1)[8] = 0;
++	}
++#endif
++
++	memset(&ops, 0, sizeof(ops));
++	ops.mode = MTD_OOB_AUTO;
++	ops.len = (data) ? chunkBytes : 0;
++	ops.ooblen = YTAG1_SIZE;
++	ops.datbuf = (__u8 *)data;
++	ops.oobbuf = (__u8 *)&pt1;
++
++	retval = mtd->write_oob(mtd, addr, &ops);
++	if (retval) {
++		yaffs_trace(YAFFS_TRACE_MTD,
++			"write_oob failed, chunk %d, mtd error %d\n",
++			chunkInNAND, retval);
++	}
++	return retval ? YAFFS_FAIL : YAFFS_OK;
++}
++
++/* Return with empty ExtendedTags but add eccResult.
++ */
++static int rettags(yaffs_ExtendedTags * etags, int eccResult, int retval)
++{
++	if (etags) {
++		memset(etags, 0, sizeof(*etags));
++		etags->eccResult = eccResult;
++	}
++	return retval;
++}
++
++/* Read a chunk (page) from NAND.
++ *
++ * Caller expects ExtendedTags data to be usable even on error; that is,
++ * all members except eccResult and blockBad are zeroed.
++ *
++ *  - Check ECC results for data (if applicable)
++ *  - Check for blank/erased block (return empty ExtendedTags if blank)
++ *  - Check the PackedTags1 mini-ECC (correct if necessary/possible)
++ *  - Convert PackedTags1 to ExtendedTags
++ *  - Update eccResult and blockBad members to refect state.
++ *
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
++	int chunkInNAND, __u8 * data, yaffs_ExtendedTags * etags)
++{
++	struct mtd_info * mtd = dev->genericDevice;
++	int chunkBytes = dev->nDataBytesPerChunk;
++	loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++	int eccres = YAFFS_ECC_RESULT_NO_ERROR;
++	struct mtd_oob_ops ops;
++	yaffs_PackedTags1 pt1;
++	int retval;
++	int deleted;
++
++	memset(&ops, 0, sizeof(ops));
++	ops.mode = MTD_OOB_AUTO;
++	ops.len = (data) ? chunkBytes : 0;
++	ops.ooblen = YTAG1_SIZE;
++	ops.datbuf = data;
++	ops.oobbuf = (__u8 *)&pt1;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++	/* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
++	 * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
++	 */
++	ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
++#endif
++	/* Read page and oob using MTD.
++	 * Check status and determine ECC result.
++	 */
++	retval = mtd->read_oob(mtd, addr, &ops);
++	if (retval) {
++		yaffs_trace(YAFFS_TRACE_MTD,
++			"read_oob failed, chunk %d, mtd error %d\n",
++			chunkInNAND, retval);
++	}
++
++	switch (retval) {
++	case 0:
++		/* no error */
++		break;
++
++	case -EUCLEAN:
++		/* MTD's ECC fixed the data */
++		eccres = YAFFS_ECC_RESULT_FIXED;
++		dev->eccFixed++;
++		break;
++
++	case -EBADMSG:
++		/* MTD's ECC could not fix the data */
++		dev->eccUnfixed++;
++		/* fall into... */
++	default:
++		rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
++		etags->blockBad = (mtd->block_isbad)(mtd, addr);
++		return YAFFS_FAIL;
++	}
++
++	/* Check for a blank/erased chunk.
++	 */
++	if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
++		/* when blank, upper layers want eccResult to be <= NO_ERROR */
++		return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
++	}
++
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++	/* Read deleted status (bit) then return it to it's non-deleted
++	 * state before performing tags mini-ECC check. pt1.deleted is
++	 * inverted.
++	 */
++	deleted = !pt1.deleted;
++	pt1.deleted = 1;
++#else
++	(void) deleted; /* not used */
++#endif
++
++	/* Check the packed tags mini-ECC and correct if necessary/possible.
++	 */
++	retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
++	switch (retval) {
++	case 0:
++		/* no tags error, use MTD result */
++		break;
++	case 1:
++		/* recovered tags-ECC error */
++		dev->tagsEccFixed++;
++		eccres = YAFFS_ECC_RESULT_FIXED;
++		break;
++	default:
++		/* unrecovered tags-ECC error */
++		dev->tagsEccUnfixed++;
++		return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
++	}
++
++	/* Unpack the tags to extended form and set ECC result.
++	 * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
++	 */
++	pt1.shouldBeFF = 0xFFFFFFFF;
++	yaffs_UnpackTags1(etags, &pt1);
++	etags->eccResult = eccres;
++
++	/* Set deleted state.
++	 */
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++	etags->chunkDeleted = deleted;
++#else
++	etags->chunkDeleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
++#endif
++	return YAFFS_OK;
++}
++
++/* Mark a block bad.
++ *
++ * This is a persistant state.
++ * Use of this function should be rare.
++ *
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++{
++	struct mtd_info * mtd = dev->genericDevice;
++	int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
++	int retval;
++
++	yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", blockNo);
++
++	retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
++	return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++/* Check any MTD prerequists.
++ * 
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++static int nandmtd1_TestPrerequists(struct mtd_info * mtd)
++{
++	/* 2.6.18 has mtd->ecclayout->oobavail */
++	/* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
++	int oobavail = mtd->ecclayout->oobavail;
++
++	if (oobavail < YTAG1_SIZE) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"mtd device has only %d bytes for tags, need %d",
++			oobavail, YTAG1_SIZE);
++		return YAFFS_FAIL;
++	}
++	return YAFFS_OK;
++}
++
++/* Query for the current state of a specific block.
++ *
++ * Examine the tags of the first chunk of the block and return the state:
++ *  - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
++ *  - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
++ *  - YAFFS_BLOCK_STATE_EMPTY, the block is clean
++ *
++ * Always returns YAFFS_OK.
++ */
++int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++	yaffs_BlockState * pState, int *pSequenceNumber)
++{
++	struct mtd_info * mtd = dev->genericDevice;
++	int chunkNo = blockNo * dev->nChunksPerBlock;
++	yaffs_ExtendedTags etags;
++	int state = YAFFS_BLOCK_STATE_DEAD;
++	int seqnum = 0;
++	int retval;
++
++	/* We don't yet have a good place to test for MTD config prerequists.
++	 * Do it here as we are called during the initial scan.
++	 */
++	if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) {
++		return YAFFS_FAIL;
++	}
++
++	retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
++	if (etags.blockBad) {
++		yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++			"block %d is marked bad", blockNo);
++		state = YAFFS_BLOCK_STATE_DEAD;
++	}
++	else if (etags.chunkUsed) {
++		state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++		seqnum = etags.sequenceNumber;
++	}
++	else {
++		state = YAFFS_BLOCK_STATE_EMPTY;
++	}
++
++	*pState = state;
++	*pSequenceNumber = seqnum;
++
++	/* query always succeeds */
++	return YAFFS_OK;
++}
++
++#endif /*KERNEL_VERSION*/
++
++--Boundary-00=_5LbTGmt62YoutxM--
++
++
++
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_mtdif1.c linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif1.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_mtdif1.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif1.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,363 @@
++/*
++ * YAFFS: Yet another FFS. A NAND-flash specific file system.
++ * yaffs_mtdif1.c  NAND mtd interface functions for small-page NAND.
++ *
++ * Copyright (C) 2002 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This module provides the interface between yaffs_nand.c and the
++ * MTD API.  This version is used when the MTD interface supports the
++ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
++ * and we have small-page NAND device.
++ *
++ * These functions are invoked via function pointers in yaffs_nand.c.
++ * This replaces functionality provided by functions in yaffs_mtdif.c
++ * and the yaffs_TagsCompatability functions in yaffs_tagscompat.c that are
++ * called in yaffs_mtdif.c when the function pointers are NULL.
++ * We assume the MTD layer is performing ECC (useNANDECC is true).
++ */
++
++#include "yportenv.h"
++#include "yaffs_guts.h"
++#include "yaffs_packedtags1.h"
++#include "yaffs_tagscompat.h"	// for yaffs_CalcTagsECC
++
++#include "linux/kernel.h"
++#include "linux/version.h"
++#include "linux/types.h"
++#include "linux/mtd/mtd.h"
++
++/* Don't compile this module if we don't have MTD's mtd_oob_ops interface */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++
++const char *yaffs_mtdif1_c_version = "$Id: yaffs_mtdif1.c,v 1.3 2007/05/15 20:16:11 ian Exp $";
++
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++# define YTAG1_SIZE 8
++#else
++# define YTAG1_SIZE 9
++#endif
++
++#if 0
++/* Use the following nand_ecclayout with MTD when using
++ * CONFIG_YAFFS_9BYTE_TAGS and the older on-NAND tags layout.
++ * If you have existing Yaffs images and the byte order differs from this,
++ * adjust 'oobfree' to match your existing Yaffs data.
++ *
++ * This nand_ecclayout scatters/gathers to/from the old-yaffs layout with the
++ * pageStatus byte (at NAND spare offset 4) scattered/gathered from/to
++ * the 9th byte.
++ *
++ * Old-style on-NAND format: T0,T1,T2,T3,P,B,T4,T5,E0,E1,E2,T6,T7,E3,E4,E5
++ * We have/need PackedTags1 plus pageStatus: T0,T1,T2,T3,T4,T5,T6,T7,P
++ * where Tn are the tag bytes, En are MTD's ECC bytes, P is the pageStatus
++ * byte and B is the small-page bad-block indicator byte.
++ */
++static struct nand_ecclayout nand_oob_16 = {
++	.eccbytes = 6,
++	.eccpos = { 8, 9, 10, 13, 14, 15 },
++	.oobavail = 9,
++	.oobfree = { { 0, 4 }, { 6, 2 }, { 11, 2 }, { 4, 1 } }
++};
++#endif
++
++/* Write a chunk (page) of data to NAND.
++ *
++ * Caller always provides ExtendedTags data which are converted to a more
++ * compact (packed) form for storage in NAND.  A mini-ECC runs over the
++ * contents of the tags meta-data; used to valid the tags when read.
++ *
++ *  - Pack ExtendedTags to PackedTags1 form
++ *  - Compute mini-ECC for PackedTags1
++ *  - Write data and packed tags to NAND.
++ *
++ * Note: Due to the use of the PackedTags1 meta-data which does not include
++ * a full sequence number (as found in the larger PackedTags2 form) it is
++ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
++ * discarded and dirty.  This is not ideal: newer NAND parts are supposed
++ * to be written just once.  When Yaffs performs this operation, this
++ * function is called with a NULL data pointer -- calling MTD write_oob
++ * without data is valid usage (2.6.17).
++ *
++ * Any underlying MTD error results in YAFFS_FAIL.
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device *dev,
++	int chunkInNAND, const __u8 * data, const yaffs_ExtendedTags * etags)
++{
++	struct mtd_info * mtd = dev->genericDevice;
++	int chunkBytes = dev->nDataBytesPerChunk;
++	loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++	struct mtd_oob_ops ops;
++	yaffs_PackedTags1 pt1;
++	int retval;
++
++	/* we assume that PackedTags1 and yaffs_Tags are compatible */
++	compile_time_assertion(sizeof(yaffs_PackedTags1) == 12);
++	compile_time_assertion(sizeof(yaffs_Tags) == 8);
++
++	dev->nPageWrites++;
++
++	yaffs_PackTags1(&pt1, etags);
++	yaffs_CalcTagsECC((yaffs_Tags *)&pt1);
++
++	/* When deleting a chunk, the upper layer provides only skeletal
++	 * etags, one with chunkDeleted set.  However, we need to update the
++	 * tags, not erase them completely.  So we use the NAND write property
++	 * that only zeroed-bits stick and set tag bytes to all-ones and
++	 * zero just the (not) deleted bit.
++	 */
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++	if (etags->chunkDeleted) {
++		memset(&pt1, 0xff, 8);
++		/* clear delete status bit to indicate deleted */
++		pt1.deleted = 0;
++	}
++#else
++	((__u8 *)&pt1)[8] = 0xff;
++	if (etags->chunkDeleted) {
++		memset(&pt1, 0xff, 8);
++		/* zero pageStatus byte to indicate deleted */
++		((__u8 *)&pt1)[8] = 0;
++	}
++#endif
++
++	memset(&ops, 0, sizeof(ops));
++	ops.mode = MTD_OOB_AUTO;
++	ops.len = (data) ? chunkBytes : 0;
++	ops.ooblen = YTAG1_SIZE;
++	ops.datbuf = (__u8 *)data;
++	ops.oobbuf = (__u8 *)&pt1;
++
++	retval = mtd->write_oob(mtd, addr, &ops);
++	if (retval) {
++		yaffs_trace(YAFFS_TRACE_MTD,
++			"write_oob failed, chunk %d, mtd error %d\n",
++			chunkInNAND, retval);
++	}
++	return retval ? YAFFS_FAIL : YAFFS_OK;
++}
++
++/* Return with empty ExtendedTags but add eccResult.
++ */
++static int rettags(yaffs_ExtendedTags * etags, int eccResult, int retval)
++{
++	if (etags) {
++		memset(etags, 0, sizeof(*etags));
++		etags->eccResult = eccResult;
++	}
++	return retval;
++}
++
++/* Read a chunk (page) from NAND.
++ *
++ * Caller expects ExtendedTags data to be usable even on error; that is,
++ * all members except eccResult and blockBad are zeroed.
++ *
++ *  - Check ECC results for data (if applicable)
++ *  - Check for blank/erased block (return empty ExtendedTags if blank)
++ *  - Check the PackedTags1 mini-ECC (correct if necessary/possible)
++ *  - Convert PackedTags1 to ExtendedTags
++ *  - Update eccResult and blockBad members to refect state.
++ *
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device *dev,
++	int chunkInNAND, __u8 * data, yaffs_ExtendedTags * etags)
++{
++	struct mtd_info * mtd = dev->genericDevice;
++	int chunkBytes = dev->nDataBytesPerChunk;
++	loff_t addr = ((loff_t)chunkInNAND) * chunkBytes;
++	int eccres = YAFFS_ECC_RESULT_NO_ERROR;
++	struct mtd_oob_ops ops;
++	yaffs_PackedTags1 pt1;
++	int retval;
++	int deleted;
++
++	dev->nPageReads++;
++
++	memset(&ops, 0, sizeof(ops));
++	ops.mode = MTD_OOB_AUTO;
++	ops.len = (data) ? chunkBytes : 0;
++	ops.ooblen = YTAG1_SIZE;
++	ops.datbuf = data;
++	ops.oobbuf = (__u8 *)&pt1;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++	/* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
++	 * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
++	 */
++	ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
++#endif
++	/* Read page and oob using MTD.
++	 * Check status and determine ECC result.
++	 */
++	retval = mtd->read_oob(mtd, addr, &ops);
++	if (retval) {
++		yaffs_trace(YAFFS_TRACE_MTD,
++			"read_oob failed, chunk %d, mtd error %d\n",
++			chunkInNAND, retval);
++	}
++
++	switch (retval) {
++	case 0:
++		/* no error */
++		break;
++
++	case -EUCLEAN:
++		/* MTD's ECC fixed the data */
++		eccres = YAFFS_ECC_RESULT_FIXED;
++		dev->eccFixed++;
++		break;
++
++	case -EBADMSG:
++		/* MTD's ECC could not fix the data */
++		dev->eccUnfixed++;
++		/* fall into... */
++	default:
++		rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
++		etags->blockBad = (mtd->block_isbad)(mtd, addr);
++		return YAFFS_FAIL;
++	}
++
++	/* Check for a blank/erased chunk.
++	 */
++	if (yaffs_CheckFF((__u8 *)&pt1, 8)) {
++		/* when blank, upper layers want eccResult to be <= NO_ERROR */
++		return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
++	}
++
++#ifndef CONFIG_YAFFS_9BYTE_TAGS
++	/* Read deleted status (bit) then return it to it's non-deleted
++	 * state before performing tags mini-ECC check. pt1.deleted is
++	 * inverted.
++	 */
++	deleted = !pt1.deleted;
++	pt1.deleted = 1;
++#else
++	deleted = (yaffs_CountBits(((__u8 *)&pt1)[8]) < 7);
++#endif
++
++	/* Check the packed tags mini-ECC and correct if necessary/possible.
++	 */
++	retval = yaffs_CheckECCOnTags((yaffs_Tags *)&pt1);
++	switch (retval) {
++	case 0:
++		/* no tags error, use MTD result */
++		break;
++	case 1:
++		/* recovered tags-ECC error */
++		dev->tagsEccFixed++;
++		if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
++			eccres = YAFFS_ECC_RESULT_FIXED;
++		break;
++	default:
++		/* unrecovered tags-ECC error */
++		dev->tagsEccUnfixed++;
++		return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
++	}
++
++	/* Unpack the tags to extended form and set ECC result.
++	 * [set shouldBeFF just to keep yaffs_UnpackTags1 happy]
++	 */
++	pt1.shouldBeFF = 0xFFFFFFFF;
++	yaffs_UnpackTags1(etags, &pt1);
++	etags->eccResult = eccres;
++
++	/* Set deleted state */
++	etags->chunkDeleted = deleted;
++	return YAFFS_OK;
++}
++
++/* Mark a block bad.
++ *
++ * This is a persistant state.
++ * Use of this function should be rare.
++ *
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++{
++	struct mtd_info * mtd = dev->genericDevice;
++	int blocksize = dev->nChunksPerBlock * dev->nDataBytesPerChunk;
++	int retval;
++
++	yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", blockNo);
++
++	retval = mtd->block_markbad(mtd, (loff_t)blocksize * blockNo);
++	return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++/* Check any MTD prerequists.
++ * 
++ * Returns YAFFS_OK or YAFFS_FAIL.
++ */
++static int nandmtd1_TestPrerequists(struct mtd_info * mtd)
++{
++	/* 2.6.18 has mtd->ecclayout->oobavail */
++	/* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
++	int oobavail = mtd->ecclayout->oobavail;
++
++	if (oobavail < YTAG1_SIZE) {
++		yaffs_trace(YAFFS_TRACE_ERROR,
++			"mtd device has only %d bytes for tags, need %d\n",
++			oobavail, YTAG1_SIZE);
++		return YAFFS_FAIL;
++	}
++	return YAFFS_OK;
++}
++
++/* Query for the current state of a specific block.
++ *
++ * Examine the tags of the first chunk of the block and return the state:
++ *  - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
++ *  - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
++ *  - YAFFS_BLOCK_STATE_EMPTY, the block is clean
++ *
++ * Always returns YAFFS_OK.
++ */
++int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++	yaffs_BlockState * pState, int *pSequenceNumber)
++{
++	struct mtd_info * mtd = dev->genericDevice;
++	int chunkNo = blockNo * dev->nChunksPerBlock;
++	yaffs_ExtendedTags etags;
++	int state = YAFFS_BLOCK_STATE_DEAD;
++	int seqnum = 0;
++	int retval;
++
++	/* We don't yet have a good place to test for MTD config prerequists.
++	 * Do it here as we are called during the initial scan.
++	 */
++	if (nandmtd1_TestPrerequists(mtd) != YAFFS_OK) {
++		return YAFFS_FAIL;
++	}
++
++	retval = nandmtd1_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &etags);
++	if (etags.blockBad) {
++		yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++			"block %d is marked bad", blockNo);
++		state = YAFFS_BLOCK_STATE_DEAD;
++	}
++	else if (etags.chunkUsed) {
++		state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++		seqnum = etags.sequenceNumber;
++	}
++	else {
++		state = YAFFS_BLOCK_STATE_EMPTY;
++	}
++
++	*pState = state;
++	*pSequenceNumber = seqnum;
++
++	/* query always succeeds */
++	return YAFFS_OK;
++}
++
++#endif /*KERNEL_VERSION*/
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_mtdif1.h linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif1.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_mtdif1.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif1.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF1_H__
++#define __YAFFS_MTDIF1_H__
++
++int nandmtd1_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
++	const __u8 * data, const yaffs_ExtendedTags * tags);
++
++int nandmtd1_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
++	__u8 * data, yaffs_ExtendedTags * tags);
++
++int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
++
++int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++	yaffs_BlockState * state, int *sequenceNumber);
++
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_mtdif2.c linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif2.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_mtdif2.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif2.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,232 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* mtd interface for YAFFS2 */
++
++const char *yaffs_mtdif2_c_version =
++    "$Id: yaffs_mtdif2.c,v 1.17 2007-02-14 01:09:06 wookey Exp $";
++
++#include "yportenv.h"
++
++
++#include "yaffs_mtdif2.h"
++
++#include "linux/mtd/mtd.h"
++#include "linux/types.h"
++#include "linux/time.h"
++
++#include "yaffs_packedtags2.h"
++
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
++				      const __u8 * data,
++				      const yaffs_ExtendedTags * tags)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	struct mtd_oob_ops ops;
++#else
++	size_t dummy;
++#endif
++	int retval = 0;
++
++	loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++
++	yaffs_PackedTags2 pt;
++
++	T(YAFFS_TRACE_MTD,
++	  (TSTR
++	   ("nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p"
++	    TENDSTR), chunkInNAND, data, tags));
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	if (tags)
++		yaffs_PackTags2(&pt, tags);
++	else
++		BUG(); /* both tags and data should always be present */
++
++	if (data) {
++		ops.mode = MTD_OOB_AUTO;
++		ops.ooblen = sizeof(pt);
++		ops.len = dev->nDataBytesPerChunk;
++		ops.ooboffs = 0;
++		ops.datbuf = (__u8 *)data;
++		ops.oobbuf = (void *)&pt;
++		retval = mtd->write_oob(mtd, addr, &ops);
++	} else
++		BUG(); /* both tags and data should always be present */
++#else
++	if (tags) {
++		yaffs_PackTags2(&pt, tags);
++	}
++
++	if (data && tags) {
++		if (dev->useNANDECC)
++			retval =
++			    mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					   &dummy, data, (__u8 *) & pt, NULL);
++		else
++			retval =
++			    mtd->write_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					   &dummy, data, (__u8 *) & pt, NULL);
++	} else {
++		if (data)
++			retval =
++			    mtd->write(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++				       data);
++		if (tags)
++			retval =
++			    mtd->write_oob(mtd, addr, mtd->oobsize, &dummy,
++					   (__u8 *) & pt);
++
++	}
++#endif
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
++				       __u8 * data, yaffs_ExtendedTags * tags)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	struct mtd_oob_ops ops;
++#endif
++	size_t dummy;
++	int retval = 0;
++
++	loff_t addr = ((loff_t) chunkInNAND) * dev->nDataBytesPerChunk;
++
++	yaffs_PackedTags2 pt;
++
++	T(YAFFS_TRACE_MTD,
++	  (TSTR
++	   ("nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p"
++	    TENDSTR), chunkInNAND, data, tags));
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17))
++	if (data && !tags)
++		retval = mtd->read(mtd, addr, dev->nDataBytesPerChunk,
++				&dummy, data);
++	else if (tags) {
++		ops.mode = MTD_OOB_AUTO;
++		ops.ooblen = sizeof(pt);
++		ops.len = data ? dev->nDataBytesPerChunk : sizeof(pt);
++		ops.ooboffs = 0;
++		ops.datbuf = data;
++		ops.oobbuf = dev->spareBuffer;
++		retval = mtd->read_oob(mtd, addr, &ops);
++	}
++#else
++	if (data && tags) {
++		if (dev->useNANDECC) {
++			retval =
++			    mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					  &dummy, data, dev->spareBuffer,
++					  NULL);
++		} else {
++			retval =
++			    mtd->read_ecc(mtd, addr, dev->nDataBytesPerChunk,
++					  &dummy, data, dev->spareBuffer,
++					  NULL);
++		}
++	} else {
++		if (data)
++			retval =
++			    mtd->read(mtd, addr, dev->nDataBytesPerChunk, &dummy,
++				      data);
++		if (tags)
++			retval =
++			    mtd->read_oob(mtd, addr, mtd->oobsize, &dummy,
++					  dev->spareBuffer);
++	}
++#endif
++
++	memcpy(&pt, dev->spareBuffer, sizeof(pt));
++
++	if (tags)
++		yaffs_UnpackTags2(tags, &pt);
++	
++	if(tags && retval == -EBADMSG && tags->eccResult == YAFFS_ECC_RESULT_NO_ERROR)
++		tags->eccResult = YAFFS_ECC_RESULT_UNFIXED;
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
++int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++	int retval;
++	T(YAFFS_TRACE_MTD,
++	  (TSTR("nandmtd2_MarkNANDBlockBad %d" TENDSTR), blockNo));
++
++	retval =
++	    mtd->block_markbad(mtd,
++			       blockNo * dev->nChunksPerBlock *
++			       dev->nDataBytesPerChunk);
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++
++}
++
++int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++			    yaffs_BlockState * state, int *sequenceNumber)
++{
++	struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
++	int retval;
++
++	T(YAFFS_TRACE_MTD,
++	  (TSTR("nandmtd2_QueryNANDBlock %d" TENDSTR), blockNo));
++	retval =
++	    mtd->block_isbad(mtd,
++			     blockNo * dev->nChunksPerBlock *
++			     dev->nDataBytesPerChunk);
++
++	if (retval) {
++		T(YAFFS_TRACE_MTD, (TSTR("block is bad" TENDSTR)));
++
++		*state = YAFFS_BLOCK_STATE_DEAD;
++		*sequenceNumber = 0;
++	} else {
++		yaffs_ExtendedTags t;
++		nandmtd2_ReadChunkWithTagsFromNAND(dev,
++						   blockNo *
++						   dev->nChunksPerBlock, NULL,
++						   &t);
++
++		if (t.chunkUsed) {
++			*sequenceNumber = t.sequenceNumber;
++			*state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++		} else {
++			*sequenceNumber = 0;
++			*state = YAFFS_BLOCK_STATE_EMPTY;
++		}
++	}
++	T(YAFFS_TRACE_MTD,
++	  (TSTR("block is bad seq %d state %d" TENDSTR), *sequenceNumber,
++	   *state));
++
++	if (retval == 0)
++		return YAFFS_OK;
++	else
++		return YAFFS_FAIL;
++}
++
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_mtdif2.h linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif2.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_mtdif2.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_mtdif2.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,29 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF2_H__
++#define __YAFFS_MTDIF2_H__
++
++#include "yaffs_guts.h"
++int nandmtd2_WriteChunkWithTagsToNAND(yaffs_Device * dev, int chunkInNAND,
++				      const __u8 * data,
++				      const yaffs_ExtendedTags * tags);
++int nandmtd2_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
++				       __u8 * data, yaffs_ExtendedTags * tags);
++int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
++int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++			    yaffs_BlockState * state, int *sequenceNumber);
++
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_nand.c linux-2.6.21.1.new/fs/yaffs2/yaffs_nand.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_nand.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_nand.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,134 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++ 
++const char *yaffs_nand_c_version =
++    "$Id: yaffs_nand.c,v 1.7 2007-02-14 01:09:06 wookey Exp $";
++
++#include "yaffs_nand.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_tagsvalidity.h"
++
++
++int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
++					   __u8 * buffer,
++					   yaffs_ExtendedTags * tags)
++{
++	int result;
++	yaffs_ExtendedTags localTags;
++	
++	int realignedChunkInNAND = chunkInNAND - dev->chunkOffset;
++	
++	/* If there are no tags provided, use local tags to get prioritised gc working */
++	if(!tags)
++		tags = &localTags;
++
++	if (dev->readChunkWithTagsFromNAND)
++		result = dev->readChunkWithTagsFromNAND(dev, realignedChunkInNAND, buffer,
++						      tags);
++	else
++		result = yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(dev,
++									realignedChunkInNAND,
++									buffer,
++									tags);	
++	if(tags && 
++	   tags->eccResult > YAFFS_ECC_RESULT_NO_ERROR){
++	
++		yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, chunkInNAND/dev->nChunksPerBlock);
++                yaffs_HandleChunkError(dev,bi);
++	}
++								
++	return result;
++}
++
++int yaffs_WriteChunkWithTagsToNAND(yaffs_Device * dev,
++						   int chunkInNAND,
++						   const __u8 * buffer,
++						   yaffs_ExtendedTags * tags)
++{
++	chunkInNAND -= dev->chunkOffset;
++
++	
++	if (tags) {
++		tags->sequenceNumber = dev->sequenceNumber;
++		tags->chunkUsed = 1;
++		if (!yaffs_ValidateTags(tags)) {
++			T(YAFFS_TRACE_ERROR,
++			  (TSTR("Writing uninitialised tags" TENDSTR)));
++			YBUG();
++		}
++		T(YAFFS_TRACE_WRITE,
++		  (TSTR("Writing chunk %d tags %d %d" TENDSTR), chunkInNAND,
++		   tags->objectId, tags->chunkId));
++	} else {
++		T(YAFFS_TRACE_ERROR, (TSTR("Writing with no tags" TENDSTR)));
++		YBUG();
++	}
++
++	if (dev->writeChunkWithTagsToNAND)
++		return dev->writeChunkWithTagsToNAND(dev, chunkInNAND, buffer,
++						     tags);
++	else
++		return yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(dev,
++								       chunkInNAND,
++								       buffer,
++								       tags);
++}
++
++int yaffs_MarkBlockBad(yaffs_Device * dev, int blockNo)
++{
++	blockNo -= dev->blockOffset;
++
++;
++	if (dev->markNANDBlockBad)
++		return dev->markNANDBlockBad(dev, blockNo);
++	else
++		return yaffs_TagsCompatabilityMarkNANDBlockBad(dev, blockNo);
++}
++
++int yaffs_QueryInitialBlockState(yaffs_Device * dev,
++						 int blockNo,
++						 yaffs_BlockState * state,
++						 unsigned *sequenceNumber)
++{
++	blockNo -= dev->blockOffset;
++
++	if (dev->queryNANDBlock)
++		return dev->queryNANDBlock(dev, blockNo, state, sequenceNumber);
++	else
++		return yaffs_TagsCompatabilityQueryNANDBlock(dev, blockNo,
++							     state,
++							     sequenceNumber);
++}
++
++
++int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
++				  int blockInNAND)
++{
++	int result;
++
++	blockInNAND -= dev->blockOffset;
++
++
++	dev->nBlockErasures++;
++	result = dev->eraseBlockInNAND(dev, blockInNAND);
++
++	return result;
++}
++
++int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev)
++{
++	return dev->initialiseNAND(dev);
++}
++
++
++ 
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_nand.h linux-2.6.21.1.new/fs/yaffs2/yaffs_nand.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_nand.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_nand.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_NAND_H__
++#define __YAFFS_NAND_H__
++#include "yaffs_guts.h"
++
++
++
++int yaffs_ReadChunkWithTagsFromNAND(yaffs_Device * dev, int chunkInNAND,
++					   __u8 * buffer,
++					   yaffs_ExtendedTags * tags);
++
++int yaffs_WriteChunkWithTagsToNAND(yaffs_Device * dev,
++						   int chunkInNAND,
++						   const __u8 * buffer,
++						   yaffs_ExtendedTags * tags);
++
++int yaffs_MarkBlockBad(yaffs_Device * dev, int blockNo);
++
++int yaffs_QueryInitialBlockState(yaffs_Device * dev,
++						 int blockNo,
++						 yaffs_BlockState * state,
++						 unsigned *sequenceNumber);
++
++int yaffs_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
++				  int blockInNAND);
++
++int yaffs_InitialiseNAND(struct yaffs_DeviceStruct *dev);
++
++#endif
++
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_nandemul2k.h linux-2.6.21.1.new/fs/yaffs2/yaffs_nandemul2k.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_nandemul2k.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_nandemul2k.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* Interface to emulated NAND functions (2k page size) */
++
++#ifndef __YAFFS_NANDEMUL2K_H__
++#define __YAFFS_NANDEMUL2K_H__
++
++#include "yaffs_guts.h"
++
++int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_DeviceStruct *dev,
++					int chunkInNAND, const __u8 * data,
++					yaffs_ExtendedTags * tags);
++int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_DeviceStruct *dev,
++					 int chunkInNAND, __u8 * data,
++					 yaffs_ExtendedTags * tags);
++int nandemul2k_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
++int nandemul2k_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
++			      yaffs_BlockState * state, int *sequenceNumber);
++int nandemul2k_EraseBlockInNAND(struct yaffs_DeviceStruct *dev,
++				int blockInNAND);
++int nandemul2k_InitialiseNAND(struct yaffs_DeviceStruct *dev);
++int nandemul2k_GetBytesPerChunk(void);
++int nandemul2k_GetChunksPerBlock(void);
++int nandemul2k_GetNumberOfBlocks(void);
++
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_packedtags1.c linux-2.6.21.1.new/fs/yaffs2/yaffs_packedtags1.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_packedtags1.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_packedtags1.c	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,52 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags1.h"
++#include "yportenv.h"
++
++void yaffs_PackTags1(yaffs_PackedTags1 * pt, const yaffs_ExtendedTags * t)
++{
++	pt->chunkId = t->chunkId;
++	pt->serialNumber = t->serialNumber;
++	pt->byteCount = t->byteCount;
++	pt->objectId = t->objectId;
++	pt->ecc = 0;
++	pt->deleted = (t->chunkDeleted) ? 0 : 1;
++	pt->unusedStuff = 0;
++	pt->shouldBeFF = 0xFFFFFFFF;
++
++}
++
++void yaffs_UnpackTags1(yaffs_ExtendedTags * t, const yaffs_PackedTags1 * pt)
++{
++	static const __u8 allFF[] =
++	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++0xff };
++
++	if (memcmp(allFF, pt, sizeof(yaffs_PackedTags1))) {
++		t->blockBad = 0;
++		if (pt->shouldBeFF != 0xFFFFFFFF) {
++			t->blockBad = 1;
++		}
++		t->chunkUsed = 1;
++		t->objectId = pt->objectId;
++		t->chunkId = pt->chunkId;
++		t->byteCount = pt->byteCount;
++		t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++		t->chunkDeleted = (pt->deleted) ? 0 : 1;
++		t->serialNumber = pt->serialNumber;
++	} else {
++		memset(t, 0, sizeof(yaffs_ExtendedTags));
++
++	}
++}
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_packedtags1.h linux-2.6.21.1.new/fs/yaffs2/yaffs_packedtags1.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_packedtags1.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_packedtags1.h	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,37 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
++
++#ifndef __YAFFS_PACKEDTAGS1_H__
++#define __YAFFS_PACKEDTAGS1_H__
++
++#include "yaffs_guts.h"
++
++typedef struct {
++	unsigned chunkId:20;
++	unsigned serialNumber:2;
++	unsigned byteCount:10;
++	unsigned objectId:18;
++	unsigned ecc:12;
++	unsigned deleted:1;
++	unsigned unusedStuff:1;
++	unsigned shouldBeFF;
++
++} yaffs_PackedTags1;
++
++void yaffs_PackTags1(yaffs_PackedTags1 * pt, const yaffs_ExtendedTags * t);
++void yaffs_UnpackTags1(yaffs_ExtendedTags * t, const yaffs_PackedTags1 * pt);
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_packedtags2.c linux-2.6.21.1.new/fs/yaffs2/yaffs_packedtags2.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_packedtags2.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_packedtags2.c	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,182 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags2.h"
++#include "yportenv.h"
++#include "yaffs_tagsvalidity.h"
++
++/* This code packs a set of extended tags into a binary structure for
++ * NAND storage
++ */
++
++/* Some of the information is "extra" struff which can be packed in to
++ * speed scanning
++ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
++ */
++
++/* Extra flags applied to chunkId */
++
++#define EXTRA_HEADER_INFO_FLAG	0x80000000
++#define EXTRA_SHRINK_FLAG	0x40000000
++#define EXTRA_SHADOWS_FLAG	0x20000000
++#define EXTRA_SPARE_FLAGS	0x10000000
++
++#define ALL_EXTRA_FLAGS		0xF0000000
++
++/* Also, the top 4 bits of the object Id are set to the object type. */
++#define EXTRA_OBJECT_TYPE_SHIFT (28)
++#define EXTRA_OBJECT_TYPE_MASK  ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
++
++static void yaffs_DumpPackedTags2(const yaffs_PackedTags2 * pt)
++{
++	T(YAFFS_TRACE_MTD,
++	  (TSTR("packed tags obj %d chunk %d byte %d seq %d" TENDSTR),
++	   pt->t.objectId, pt->t.chunkId, pt->t.byteCount,
++	   pt->t.sequenceNumber));
++}
++
++static void yaffs_DumpTags2(const yaffs_ExtendedTags * t)
++{
++	T(YAFFS_TRACE_MTD,
++	  (TSTR
++	   ("ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte "
++	    "%d del %d ser %d seq %d"
++	    TENDSTR), t->eccResult, t->blockBad, t->chunkUsed, t->objectId,
++	   t->chunkId, t->byteCount, t->chunkDeleted, t->serialNumber,
++	   t->sequenceNumber));
++
++}
++
++void yaffs_PackTags2(yaffs_PackedTags2 * pt, const yaffs_ExtendedTags * t)
++{
++	pt->t.chunkId = t->chunkId;
++	pt->t.sequenceNumber = t->sequenceNumber;
++	pt->t.byteCount = t->byteCount;
++	pt->t.objectId = t->objectId;
++
++	if (t->chunkId == 0 && t->extraHeaderInfoAvailable) {
++		/* Store the extra header info instead */
++		/* We save the parent object in the chunkId */
++		pt->t.chunkId = EXTRA_HEADER_INFO_FLAG
++			| t->extraParentObjectId;
++		if (t->extraIsShrinkHeader) {
++			pt->t.chunkId |= EXTRA_SHRINK_FLAG;
++		}
++		if (t->extraShadows) {
++			pt->t.chunkId |= EXTRA_SHADOWS_FLAG;
++		}
++
++		pt->t.objectId &= ~EXTRA_OBJECT_TYPE_MASK;
++		pt->t.objectId |=
++		    (t->extraObjectType << EXTRA_OBJECT_TYPE_SHIFT);
++
++		if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) {
++			pt->t.byteCount = t->extraEquivalentObjectId;
++		} else if (t->extraObjectType == YAFFS_OBJECT_TYPE_FILE) {
++			pt->t.byteCount = t->extraFileLength;
++		} else {
++			pt->t.byteCount = 0;
++		}
++	}
++
++	yaffs_DumpPackedTags2(pt);
++	yaffs_DumpTags2(t);
++
++#ifndef YAFFS_IGNORE_TAGS_ECC
++	{
++		yaffs_ECCCalculateOther((unsigned char *)&pt->t,
++					sizeof(yaffs_PackedTags2TagsPart),
++					&pt->ecc);
++	}
++#endif
++}
++
++void yaffs_UnpackTags2(yaffs_ExtendedTags * t, yaffs_PackedTags2 * pt)
++{
++
++	memset(t, 0, sizeof(yaffs_ExtendedTags));
++
++	yaffs_InitialiseTags(t);
++
++	if (pt->t.sequenceNumber != 0xFFFFFFFF) {
++		/* Page is in use */
++#ifdef YAFFS_IGNORE_TAGS_ECC
++		{
++			t->eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++		}
++#else
++		{
++			yaffs_ECCOther ecc;
++			int result;
++			yaffs_ECCCalculateOther((unsigned char *)&pt->t,
++						sizeof
++						(yaffs_PackedTags2TagsPart),
++						&ecc);
++			result =
++			    yaffs_ECCCorrectOther((unsigned char *)&pt->t,
++						  sizeof
++						  (yaffs_PackedTags2TagsPart),
++						  &pt->ecc, &ecc);
++			switch(result){
++				case 0: 
++					t->eccResult = YAFFS_ECC_RESULT_NO_ERROR; 
++					break;
++				case 1: 
++					t->eccResult = YAFFS_ECC_RESULT_FIXED;
++					break;
++				case -1:
++					t->eccResult = YAFFS_ECC_RESULT_UNFIXED;
++					break;
++				default:
++					t->eccResult = YAFFS_ECC_RESULT_UNKNOWN;
++			}
++		}
++#endif
++		t->blockBad = 0;
++		t->chunkUsed = 1;
++		t->objectId = pt->t.objectId;
++		t->chunkId = pt->t.chunkId;
++		t->byteCount = pt->t.byteCount;
++		t->chunkDeleted = 0;
++		t->serialNumber = 0;
++		t->sequenceNumber = pt->t.sequenceNumber;
++
++		/* Do extra header info stuff */
++
++		if (pt->t.chunkId & EXTRA_HEADER_INFO_FLAG) {
++			t->chunkId = 0;
++			t->byteCount = 0;
++
++			t->extraHeaderInfoAvailable = 1;
++			t->extraParentObjectId =
++			    pt->t.chunkId & (~(ALL_EXTRA_FLAGS));
++			t->extraIsShrinkHeader =
++			    (pt->t.chunkId & EXTRA_SHRINK_FLAG) ? 1 : 0;
++			t->extraShadows =
++			    (pt->t.chunkId & EXTRA_SHADOWS_FLAG) ? 1 : 0;
++			t->extraObjectType =
++			    pt->t.objectId >> EXTRA_OBJECT_TYPE_SHIFT;
++			t->objectId &= ~EXTRA_OBJECT_TYPE_MASK;
++
++			if (t->extraObjectType == YAFFS_OBJECT_TYPE_HARDLINK) {
++				t->extraEquivalentObjectId = pt->t.byteCount;
++			} else {
++				t->extraFileLength = pt->t.byteCount;
++			}
++		}
++	}
++
++	yaffs_DumpPackedTags2(pt);
++	yaffs_DumpTags2(t);
++
++}
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_packedtags2.h linux-2.6.21.1.new/fs/yaffs2/yaffs_packedtags2.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_packedtags2.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_packedtags2.h	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,38 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
++
++#ifndef __YAFFS_PACKEDTAGS2_H__
++#define __YAFFS_PACKEDTAGS2_H__
++
++#include "yaffs_guts.h"
++#include "yaffs_ecc.h"
++
++typedef struct {
++	unsigned sequenceNumber;
++	unsigned objectId;
++	unsigned chunkId;
++	unsigned byteCount;
++} yaffs_PackedTags2TagsPart;
++
++typedef struct {
++	yaffs_PackedTags2TagsPart t;
++	yaffs_ECCOther ecc;
++} yaffs_PackedTags2;
++
++void yaffs_PackTags2(yaffs_PackedTags2 * pt, const yaffs_ExtendedTags * t);
++void yaffs_UnpackTags2(yaffs_ExtendedTags * t, yaffs_PackedTags2 * pt);
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_qsort.c linux-2.6.21.1.new/fs/yaffs2/yaffs_qsort.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_qsort.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_qsort.c	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,160 @@
++/*
++ * Copyright (c) 1992, 1993
++ *	The Regents of the University of California.  All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ * 3. Neither the name of the University nor the names of its contributors
++ *    may be used to endorse or promote products derived from this software
++ *    without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ */
++
++#include "yportenv.h"
++//#include <linux/string.h>
++
++/*
++ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
++ */
++#define swapcode(TYPE, parmi, parmj, n) { 		\
++	long i = (n) / sizeof (TYPE); 			\
++	register TYPE *pi = (TYPE *) (parmi); 		\
++	register TYPE *pj = (TYPE *) (parmj); 		\
++	do { 						\
++		register TYPE	t = *pi;		\
++		*pi++ = *pj;				\
++		*pj++ = t;				\
++        } while (--i > 0);				\
++}
++
++#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
++	es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
++
++static __inline void
++swapfunc(char *a, char *b, int n, int swaptype)
++{
++	if (swaptype <= 1) 
++		swapcode(long, a, b, n)
++	else
++		swapcode(char, a, b, n)
++}
++
++#define swap(a, b)					\
++	if (swaptype == 0) {				\
++		long t = *(long *)(a);			\
++		*(long *)(a) = *(long *)(b);		\
++		*(long *)(b) = t;			\
++	} else						\
++		swapfunc(a, b, es, swaptype)
++
++#define vecswap(a, b, n) 	if ((n) > 0) swapfunc(a, b, n, swaptype)
++
++static __inline char *
++med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *))
++{
++	return cmp(a, b) < 0 ?
++	       (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a ))
++              :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c ));
++}
++
++#ifndef min
++#define min(a,b) (((a) < (b)) ? (a) : (b))
++#endif
++
++void
++yaffs_qsort(void *aa, size_t n, size_t es,
++	int (*cmp)(const void *, const void *))
++{
++	char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
++	int d, r, swaptype, swap_cnt;
++	register char *a = aa;
++
++loop:	SWAPINIT(a, es);
++	swap_cnt = 0;
++	if (n < 7) {
++		for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es)
++			for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
++			     pl -= es)
++				swap(pl, pl - es);
++		return;
++	}
++	pm = (char *)a + (n / 2) * es;
++	if (n > 7) {
++		pl = (char *)a;
++		pn = (char *)a + (n - 1) * es;
++		if (n > 40) {
++			d = (n / 8) * es;
++			pl = med3(pl, pl + d, pl + 2 * d, cmp);
++			pm = med3(pm - d, pm, pm + d, cmp);
++			pn = med3(pn - 2 * d, pn - d, pn, cmp);
++		}
++		pm = med3(pl, pm, pn, cmp);
++	}
++	swap(a, pm);
++	pa = pb = (char *)a + es;
++
++	pc = pd = (char *)a + (n - 1) * es;
++	for (;;) {
++		while (pb <= pc && (r = cmp(pb, a)) <= 0) {
++			if (r == 0) {
++				swap_cnt = 1;
++				swap(pa, pb);
++				pa += es;
++			}
++			pb += es;
++		}
++		while (pb <= pc && (r = cmp(pc, a)) >= 0) {
++			if (r == 0) {
++				swap_cnt = 1;
++				swap(pc, pd);
++				pd -= es;
++			}
++			pc -= es;
++		}
++		if (pb > pc)
++			break;
++		swap(pb, pc);
++		swap_cnt = 1;
++		pb += es;
++		pc -= es;
++	}
++	if (swap_cnt == 0) {  /* Switch to insertion sort */
++		for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
++			for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; 
++			     pl -= es)
++				swap(pl, pl - es);
++		return;
++	}
++
++	pn = (char *)a + n * es;
++	r = min(pa - (char *)a, pb - pa);
++	vecswap(a, pb - r, r);
++	r = min((long)(pd - pc), (long)(pn - pd - es));
++	vecswap(pb, pn - r, r);
++	if ((r = pb - pa) > es)
++		yaffs_qsort(a, r / es, es, cmp);
++	if ((r = pd - pc) > es) { 
++		/* Iterate rather than recurse to save stack space */
++		a = pn - r;
++		n = r / es;
++		goto loop;
++	}
++/*		yaffs_qsort(pn - r, r / es, es, cmp);*/
++}
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_qsort.h linux-2.6.21.1.new/fs/yaffs2/yaffs_qsort.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_qsort.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_qsort.h	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,23 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#ifndef __YAFFS_QSORT_H__
++#define __YAFFS_QSORT_H__
++
++extern void yaffs_qsort (void *const base, size_t total_elems, size_t size,
++                   int (*cmp)(const void *, const void *));
++
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_tagscompat.c linux-2.6.21.1.new/fs/yaffs2/yaffs_tagscompat.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_tagscompat.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_tagscompat.c	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,530 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_ecc.h"
++
++static void yaffs_HandleReadDataError(yaffs_Device * dev, int chunkInNAND);
++#ifdef NOTYET
++static void yaffs_CheckWrittenBlock(yaffs_Device * dev, int chunkInNAND);
++static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
++				     const __u8 * data,
++				     const yaffs_Spare * spare);
++static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
++				    const yaffs_Spare * spare);
++static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND);
++#endif
++
++static const char yaffs_countBitsTable[256] = {
++	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
++	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
++	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
++	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
++	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
++	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
++	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
++	4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
++};
++
++int yaffs_CountBits(__u8 x)
++{
++	int retVal;
++	retVal = yaffs_countBitsTable[x];
++	return retVal;
++}
++
++/********** Tags ECC calculations  *********/
++
++void yaffs_CalcECC(const __u8 * data, yaffs_Spare * spare)
++{
++	yaffs_ECCCalculate(data, spare->ecc1);
++	yaffs_ECCCalculate(&data[256], spare->ecc2);
++}
++
++void yaffs_CalcTagsECC(yaffs_Tags * tags)
++{
++	/* Calculate an ecc */
++
++	unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
++	unsigned i, j;
++	unsigned ecc = 0;
++	unsigned bit = 0;
++
++	tags->ecc = 0;
++
++	for (i = 0; i < 8; i++) {
++		for (j = 1; j & 0xff; j <<= 1) {
++			bit++;
++			if (b[i] & j) {
++				ecc ^= bit;
++			}
++		}
++	}
++
++	tags->ecc = ecc;
++
++}
++
++int yaffs_CheckECCOnTags(yaffs_Tags * tags)
++{
++	unsigned ecc = tags->ecc;
++
++	yaffs_CalcTagsECC(tags);
++
++	ecc ^= tags->ecc;
++
++	if (ecc && ecc <= 64) {
++		/* TODO: Handle the failure better. Retire? */
++		unsigned char *b = ((yaffs_TagsUnion *) tags)->asBytes;
++
++		ecc--;
++
++		b[ecc / 8] ^= (1 << (ecc & 7));
++
++		/* Now recvalc the ecc */
++		yaffs_CalcTagsECC(tags);
++
++		return 1;	/* recovered error */
++	} else if (ecc) {
++		/* Wierd ecc failure value */
++		/* TODO Need to do somethiong here */
++		return -1;	/* unrecovered error */
++	}
++
++	return 0;
++}
++
++/********** Tags **********/
++
++static void yaffs_LoadTagsIntoSpare(yaffs_Spare * sparePtr,
++				    yaffs_Tags * tagsPtr)
++{
++	yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
++
++	yaffs_CalcTagsECC(tagsPtr);
++
++	sparePtr->tagByte0 = tu->asBytes[0];
++	sparePtr->tagByte1 = tu->asBytes[1];
++	sparePtr->tagByte2 = tu->asBytes[2];
++	sparePtr->tagByte3 = tu->asBytes[3];
++	sparePtr->tagByte4 = tu->asBytes[4];
++	sparePtr->tagByte5 = tu->asBytes[5];
++	sparePtr->tagByte6 = tu->asBytes[6];
++	sparePtr->tagByte7 = tu->asBytes[7];
++}
++
++static void yaffs_GetTagsFromSpare(yaffs_Device * dev, yaffs_Spare * sparePtr,
++				   yaffs_Tags * tagsPtr)
++{
++	yaffs_TagsUnion *tu = (yaffs_TagsUnion *) tagsPtr;
++	int result;
++
++	tu->asBytes[0] = sparePtr->tagByte0;
++	tu->asBytes[1] = sparePtr->tagByte1;
++	tu->asBytes[2] = sparePtr->tagByte2;
++	tu->asBytes[3] = sparePtr->tagByte3;
++	tu->asBytes[4] = sparePtr->tagByte4;
++	tu->asBytes[5] = sparePtr->tagByte5;
++	tu->asBytes[6] = sparePtr->tagByte6;
++	tu->asBytes[7] = sparePtr->tagByte7;
++
++	result = yaffs_CheckECCOnTags(tagsPtr);
++	if (result > 0) {
++		dev->tagsEccFixed++;
++	} else if (result < 0) {
++		dev->tagsEccUnfixed++;
++	}
++}
++
++static void yaffs_SpareInitialise(yaffs_Spare * spare)
++{
++	memset(spare, 0xFF, sizeof(yaffs_Spare));
++}
++
++static int yaffs_WriteChunkToNAND(struct yaffs_DeviceStruct *dev,
++				  int chunkInNAND, const __u8 * data,
++				  yaffs_Spare * spare)
++{
++	if (chunkInNAND < dev->startBlock * dev->nChunksPerBlock) {
++		T(YAFFS_TRACE_ERROR,
++		  (TSTR("**>> yaffs chunk %d is not valid" TENDSTR),
++		   chunkInNAND));
++		return YAFFS_FAIL;
++	}
++
++	dev->nPageWrites++;
++	return dev->writeChunkToNAND(dev, chunkInNAND, data, spare);
++}
++
++static int yaffs_ReadChunkFromNAND(struct yaffs_DeviceStruct *dev,
++				   int chunkInNAND,
++				   __u8 * data,
++				   yaffs_Spare * spare,
++				   yaffs_ECCResult * eccResult,
++				   int doErrorCorrection)
++{
++	int retVal;
++	yaffs_Spare localSpare;
++
++	dev->nPageReads++;
++
++	if (!spare && data) {
++		/* If we don't have a real spare, then we use a local one. */
++		/* Need this for the calculation of the ecc */
++		spare = &localSpare;
++	}
++
++	if (!dev->useNANDECC) {
++		retVal = dev->readChunkFromNAND(dev, chunkInNAND, data, spare);
++		if (data && doErrorCorrection) {
++			/* Do ECC correction */
++			/* Todo handle any errors */
++			int eccResult1, eccResult2;
++			__u8 calcEcc[3];
++
++			yaffs_ECCCalculate(data, calcEcc);
++			eccResult1 =
++			    yaffs_ECCCorrect(data, spare->ecc1, calcEcc);
++			yaffs_ECCCalculate(&data[256], calcEcc);
++			eccResult2 =
++			    yaffs_ECCCorrect(&data[256], spare->ecc2, calcEcc);
++
++			if (eccResult1 > 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>yaffs ecc error fix performed on chunk %d:0"
++				    TENDSTR), chunkInNAND));
++				dev->eccFixed++;
++			} else if (eccResult1 < 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>yaffs ecc error unfixed on chunk %d:0"
++				    TENDSTR), chunkInNAND));
++				dev->eccUnfixed++;
++			}
++
++			if (eccResult2 > 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>yaffs ecc error fix performed on chunk %d:1"
++				    TENDSTR), chunkInNAND));
++				dev->eccFixed++;
++			} else if (eccResult2 < 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>yaffs ecc error unfixed on chunk %d:1"
++				    TENDSTR), chunkInNAND));
++				dev->eccUnfixed++;
++			}
++
++			if (eccResult1 || eccResult2) {
++				/* We had a data problem on this page */
++				yaffs_HandleReadDataError(dev, chunkInNAND);
++			}
++
++			if (eccResult1 < 0 || eccResult2 < 0)
++				*eccResult = YAFFS_ECC_RESULT_UNFIXED;
++			else if (eccResult1 > 0 || eccResult2 > 0)
++				*eccResult = YAFFS_ECC_RESULT_FIXED;
++			else
++				*eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++		}
++	} else {
++		/* Must allocate enough memory for spare+2*sizeof(int) */
++		/* for ecc results from device. */
++		struct yaffs_NANDSpare nspare;
++		retVal =
++		    dev->readChunkFromNAND(dev, chunkInNAND, data,
++					   (yaffs_Spare *) & nspare);
++		memcpy(spare, &nspare, sizeof(yaffs_Spare));
++		if (data && doErrorCorrection) {
++			if (nspare.eccres1 > 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>mtd ecc error fix performed on chunk %d:0"
++				    TENDSTR), chunkInNAND));
++			} else if (nspare.eccres1 < 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>mtd ecc error unfixed on chunk %d:0"
++				    TENDSTR), chunkInNAND));
++			}
++
++			if (nspare.eccres2 > 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>mtd ecc error fix performed on chunk %d:1"
++				    TENDSTR), chunkInNAND));
++			} else if (nspare.eccres2 < 0) {
++				T(YAFFS_TRACE_ERROR,
++				  (TSTR
++				   ("**>>mtd ecc error unfixed on chunk %d:1"
++				    TENDSTR), chunkInNAND));
++			}
++
++			if (nspare.eccres1 || nspare.eccres2) {
++				/* We had a data problem on this page */
++				yaffs_HandleReadDataError(dev, chunkInNAND);
++			}
++
++			if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
++				*eccResult = YAFFS_ECC_RESULT_UNFIXED;
++			else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
++				*eccResult = YAFFS_ECC_RESULT_FIXED;
++			else
++				*eccResult = YAFFS_ECC_RESULT_NO_ERROR;
++
++		}
++	}
++	return retVal;
++}
++
++#ifdef NOTYET
++static int yaffs_CheckChunkErased(struct yaffs_DeviceStruct *dev,
++				  int chunkInNAND)
++{
++
++	static int init = 0;
++	static __u8 cmpbuf[YAFFS_BYTES_PER_CHUNK];
++	static __u8 data[YAFFS_BYTES_PER_CHUNK];
++	/* Might as well always allocate the larger size for */
++	/* dev->useNANDECC == true; */
++	static __u8 spare[sizeof(struct yaffs_NANDSpare)];
++
++	dev->readChunkFromNAND(dev, chunkInNAND, data, (yaffs_Spare *) spare);
++
++	if (!init) {
++		memset(cmpbuf, 0xff, YAFFS_BYTES_PER_CHUNK);
++		init = 1;
++	}
++
++	if (memcmp(cmpbuf, data, YAFFS_BYTES_PER_CHUNK))
++		return YAFFS_FAIL;
++	if (memcmp(cmpbuf, spare, 16))
++		return YAFFS_FAIL;
++
++	return YAFFS_OK;
++
++}
++#endif
++
++/*
++ * Functions for robustisizing
++ */
++
++static void yaffs_HandleReadDataError(yaffs_Device * dev, int chunkInNAND)
++{
++	int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++
++	/* Mark the block for retirement */
++	yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
++	T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++	  (TSTR("**>>Block %d marked for retirement" TENDSTR), blockInNAND));
++
++	/* TODO:
++	 * Just do a garbage collection on the affected block
++	 * then retire the block
++	 * NB recursion
++	 */
++}
++
++#ifdef NOTYET
++static void yaffs_CheckWrittenBlock(yaffs_Device * dev, int chunkInNAND)
++{
++}
++
++static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
++				     const __u8 * data,
++				     const yaffs_Spare * spare)
++{
++}
++
++static void yaffs_HandleUpdateChunk(yaffs_Device * dev, int chunkInNAND,
++				    const yaffs_Spare * spare)
++{
++}
++
++static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND)
++{
++	int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
++
++	/* Mark the block for retirement */
++	yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
++	/* Delete the chunk */
++	yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
++}
++
++static int yaffs_VerifyCompare(const __u8 * d0, const __u8 * d1,
++			       const yaffs_Spare * s0, const yaffs_Spare * s1)
++{
++
++	if (memcmp(d0, d1, YAFFS_BYTES_PER_CHUNK) != 0 ||
++	    s0->tagByte0 != s1->tagByte0 ||
++	    s0->tagByte1 != s1->tagByte1 ||
++	    s0->tagByte2 != s1->tagByte2 ||
++	    s0->tagByte3 != s1->tagByte3 ||
++	    s0->tagByte4 != s1->tagByte4 ||
++	    s0->tagByte5 != s1->tagByte5 ||
++	    s0->tagByte6 != s1->tagByte6 ||
++	    s0->tagByte7 != s1->tagByte7 ||
++	    s0->ecc1[0] != s1->ecc1[0] ||
++	    s0->ecc1[1] != s1->ecc1[1] ||
++	    s0->ecc1[2] != s1->ecc1[2] ||
++	    s0->ecc2[0] != s1->ecc2[0] ||
++	    s0->ecc2[1] != s1->ecc2[1] || s0->ecc2[2] != s1->ecc2[2]) {
++		return 0;
++	}
++
++	return 1;
++}
++#endif				/* NOTYET */
++
++int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device * dev,
++						    int chunkInNAND,
++						    const __u8 * data,
++						    const yaffs_ExtendedTags *
++						    eTags)
++{
++	yaffs_Spare spare;
++	yaffs_Tags tags;
++
++	yaffs_SpareInitialise(&spare);
++
++	if (eTags->chunkDeleted) {
++		spare.pageStatus = 0;
++	} else {
++		tags.objectId = eTags->objectId;
++		tags.chunkId = eTags->chunkId;
++		tags.byteCount = eTags->byteCount;
++		tags.serialNumber = eTags->serialNumber;
++
++		if (!dev->useNANDECC && data) {
++			yaffs_CalcECC(data, &spare);
++		}
++		yaffs_LoadTagsIntoSpare(&spare, &tags);
++
++	}
++
++	return yaffs_WriteChunkToNAND(dev, chunkInNAND, data, &spare);
++}
++
++int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device * dev,
++						     int chunkInNAND,
++						     __u8 * data,
++						     yaffs_ExtendedTags * eTags)
++{
++
++	yaffs_Spare spare;
++	yaffs_Tags tags;
++	yaffs_ECCResult eccResult;
++
++	static yaffs_Spare spareFF;
++	static int init;
++
++	if (!init) {
++		memset(&spareFF, 0xFF, sizeof(spareFF));
++		init = 1;
++	}
++
++	if (yaffs_ReadChunkFromNAND
++	    (dev, chunkInNAND, data, &spare, &eccResult, 1)) {
++		/* eTags may be NULL */
++		if (eTags) {
++
++			int deleted =
++			    (yaffs_CountBits(spare.pageStatus) < 7) ? 1 : 0;
++
++			eTags->chunkDeleted = deleted;
++			eTags->eccResult = eccResult;
++			eTags->blockBad = 0;	/* We're reading it */
++			/* therefore it is not a bad block */
++			eTags->chunkUsed =
++			    (memcmp(&spareFF, &spare, sizeof(spareFF)) !=
++			     0) ? 1 : 0;
++
++			if (eTags->chunkUsed) {
++				yaffs_GetTagsFromSpare(dev, &spare, &tags);
++
++				eTags->objectId = tags.objectId;
++				eTags->chunkId = tags.chunkId;
++				eTags->byteCount = tags.byteCount;
++				eTags->serialNumber = tags.serialNumber;
++			}
++		}
++
++		return YAFFS_OK;
++	} else {
++		return YAFFS_FAIL;
++	}
++}
++
++int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
++					    int blockInNAND)
++{
++
++	yaffs_Spare spare;
++
++	memset(&spare, 0xff, sizeof(yaffs_Spare));
++
++	spare.blockStatus = 'Y';
++
++	yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock, NULL,
++			       &spare);
++	yaffs_WriteChunkToNAND(dev, blockInNAND * dev->nChunksPerBlock + 1,
++			       NULL, &spare);
++
++	return YAFFS_OK;
++
++}
++
++int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
++					  int blockNo, yaffs_BlockState *
++					  state,
++					  int *sequenceNumber)
++{
++
++	yaffs_Spare spare0, spare1;
++	static yaffs_Spare spareFF;
++	static int init;
++	yaffs_ECCResult dummy;
++
++	if (!init) {
++		memset(&spareFF, 0xFF, sizeof(spareFF));
++		init = 1;
++	}
++
++	*sequenceNumber = 0;
++
++	yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock, NULL,
++				&spare0, &dummy, 1);
++	yaffs_ReadChunkFromNAND(dev, blockNo * dev->nChunksPerBlock + 1, NULL,
++				&spare1, &dummy, 1);
++
++	if (yaffs_CountBits(spare0.blockStatus & spare1.blockStatus) < 7)
++		*state = YAFFS_BLOCK_STATE_DEAD;
++	else if (memcmp(&spareFF, &spare0, sizeof(spareFF)) == 0)
++		*state = YAFFS_BLOCK_STATE_EMPTY;
++	else
++		*state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
++
++	return YAFFS_OK;
++}
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_tagscompat.h linux-2.6.21.1.new/fs/yaffs2/yaffs_tagscompat.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_tagscompat.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_tagscompat.h	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,40 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_TAGSCOMPAT_H__
++#define __YAFFS_TAGSCOMPAT_H__
++
++#include "yaffs_guts.h"
++int yaffs_TagsCompatabilityWriteChunkWithTagsToNAND(yaffs_Device * dev,
++						    int chunkInNAND,
++						    const __u8 * data,
++						    const yaffs_ExtendedTags *
++						    tags);
++int yaffs_TagsCompatabilityReadChunkWithTagsFromNAND(yaffs_Device * dev,
++						     int chunkInNAND,
++						     __u8 * data,
++						     yaffs_ExtendedTags *
++						     tags);
++int yaffs_TagsCompatabilityMarkNANDBlockBad(struct yaffs_DeviceStruct *dev,
++					    int blockNo);
++int yaffs_TagsCompatabilityQueryNANDBlock(struct yaffs_DeviceStruct *dev,
++					  int blockNo, yaffs_BlockState *
++					  state, int *sequenceNumber);
++
++void yaffs_CalcTagsECC(yaffs_Tags * tags);
++int yaffs_CheckECCOnTags(yaffs_Tags * tags);
++int yaffs_CountBits(__u8 byte);
++
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_tagsvalidity.c linux-2.6.21.1.new/fs/yaffs2/yaffs_tagsvalidity.c
+--- linux-2.6.21.1/fs/yaffs2/yaffs_tagsvalidity.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_tagsvalidity.c	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_tagsvalidity.h"
++
++void yaffs_InitialiseTags(yaffs_ExtendedTags * tags)
++{
++	memset(tags, 0, sizeof(yaffs_ExtendedTags));
++	tags->validMarker0 = 0xAAAAAAAA;
++	tags->validMarker1 = 0x55555555;
++}
++
++int yaffs_ValidateTags(yaffs_ExtendedTags * tags)
++{
++	return (tags->validMarker0 == 0xAAAAAAAA &&
++		tags->validMarker1 == 0x55555555);
++
++}
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffs_tagsvalidity.h linux-2.6.21.1.new/fs/yaffs2/yaffs_tagsvalidity.h
+--- linux-2.6.21.1/fs/yaffs2/yaffs_tagsvalidity.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffs_tagsvalidity.h	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,24 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#ifndef __YAFFS_TAGS_VALIDITY_H__
++#define __YAFFS_TAGS_VALIDITY_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_InitialiseTags(yaffs_ExtendedTags * tags);
++int yaffs_ValidateTags(yaffs_ExtendedTags * tags);
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yaffsinterface.h linux-2.6.21.1.new/fs/yaffs2/yaffsinterface.h
+--- linux-2.6.21.1/fs/yaffs2/yaffsinterface.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yaffsinterface.h	2007-06-08 14:07:26.000000000 +0200
+@@ -0,0 +1,21 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFSINTERFACE_H__
++#define __YAFFSINTERFACE_H__
++
++int yaffs_Initialise(unsigned nBlocks);
++
++#endif
+diff -urN linux-2.6.21.1/fs/yaffs2/yportenv.h linux-2.6.21.1.new/fs/yaffs2/yportenv.h
+--- linux-2.6.21.1/fs/yaffs2/yportenv.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.1.new/fs/yaffs2/yportenv.h	2007-06-08 14:07:27.000000000 +0200
+@@ -0,0 +1,186 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system. 
++ *
++ * Copyright (C) 2002-2007 Aleph One Ltd.
++ *   for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++
++#ifndef __YPORTENV_H__
++#define __YPORTENV_H__
++
++#if defined CONFIG_YAFFS_WINCE
++
++#include "ywinceenv.h"
++
++#elif  defined __KERNEL__
++
++#include "moduleconfig.h"
++
++/* Linux kernel */
++#include <linux/version.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#include <linux/config.h>
++#endif
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++
++#define YCHAR char
++#define YUCHAR unsigned char
++#define _Y(x)     x
++#define yaffs_strcpy(a,b)    strcpy(a,b)
++#define yaffs_strncpy(a,b,c) strncpy(a,b,c)
++#define yaffs_strncmp(a,b,c) strncmp(a,b,c)
++#define yaffs_strlen(s)	     strlen(s)
++#define yaffs_sprintf	     sprintf
++#define yaffs_toupper(a)     toupper(a)
++
++#define Y_INLINE inline
++
++#define YAFFS_LOSTNFOUND_NAME		"lost+found"
++#define YAFFS_LOSTNFOUND_PREFIX		"obj"
++
++/* #define YPRINTF(x) printk x */
++#define YMALLOC(x) kmalloc(x,GFP_KERNEL)
++#define YFREE(x)   kfree(x)
++#define YMALLOC_ALT(x) vmalloc(x)
++#define YFREE_ALT(x)   vfree(x)
++#define YMALLOC_DMA(x) YMALLOC(x)
++
++// KR - added for use in scan so processes aren't blocked indefinitely.
++#define YYIELD() schedule()
++
++#define YAFFS_ROOT_MODE			0666
++#define YAFFS_LOSTNFOUND_MODE		0666
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
++#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
++#define Y_TIME_CONVERT(x) (x).tv_sec
++#else
++#define Y_CURRENT_TIME CURRENT_TIME
++#define Y_TIME_CONVERT(x) (x)
++#endif
++
++#define yaffs_SumCompare(x,y) ((x) == (y))
++#define yaffs_strcmp(a,b) strcmp(a,b)
++
++#define TENDSTR "\n"
++#define TSTR(x) KERN_WARNING x
++#define TOUT(p) printk p
++
++#define yaffs_trace(mask, fmt, args...) \
++	do { if ((mask) & (yaffs_traceMask|YAFFS_TRACE_ERROR)) \
++		printk(KERN_WARNING "yaffs: " fmt, ## args); \
++	} while (0)
++
++#define compile_time_assertion(assertion) \
++	({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
++
++#elif defined CONFIG_YAFFS_DIRECT
++
++/* Direct interface */
++#include "ydirectenv.h"
++
++#elif defined CONFIG_YAFFS_UTIL
++
++/* Stuff for YAFFS utilities */
++
++#include "stdlib.h"
++#include "stdio.h"
++#include "string.h"
++
++#include "devextras.h"
++
++#define YMALLOC(x) malloc(x)
++#define YFREE(x)   free(x)
++#define YMALLOC_ALT(x) malloc(x)
++#define YFREE_ALT(x) free(x)
++
++#define YCHAR char
++#define YUCHAR unsigned char
++#define _Y(x)     x
++#define yaffs_strcpy(a,b)    strcpy(a,b)
++#define yaffs_strncpy(a,b,c) strncpy(a,b,c)
++#define yaffs_strlen(s)	     strlen(s)
++#define yaffs_sprintf	     sprintf
++#define yaffs_toupper(a)     toupper(a)
++
++#define Y_INLINE inline
++
++/* #define YINFO(s) YPRINTF(( __FILE__ " %d %s\n",__LINE__,s)) */
++/* #define YALERT(s) YINFO(s) */
++
++#define TENDSTR "\n"
++#define TSTR(x) x
++#define TOUT(p) printf p
++
++#define YAFFS_LOSTNFOUND_NAME		"lost+found"
++#define YAFFS_LOSTNFOUND_PREFIX		"obj"
++/* #define YPRINTF(x) printf x */
++
++#define YAFFS_ROOT_MODE				0666
++#define YAFFS_LOSTNFOUND_MODE		0666
++
++#define yaffs_SumCompare(x,y) ((x) == (y))
++#define yaffs_strcmp(a,b) strcmp(a,b)
++
++#else
++/* Should have specified a configuration type */
++#error Unknown configuration
++
++#endif
++
++/* see yaffs_fs.c */
++extern unsigned int yaffs_traceMask;
++extern unsigned int yaffs_wr_attempts;
++
++/*
++ * Tracing flags.
++ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
++ */
++ 
++#define YAFFS_TRACE_OS			0x00000002
++#define YAFFS_TRACE_ALLOCATE		0x00000004
++#define YAFFS_TRACE_SCAN		0x00000008
++#define YAFFS_TRACE_BAD_BLOCKS		0x00000010
++#define YAFFS_TRACE_ERASE		0x00000020
++#define YAFFS_TRACE_GC			0x00000040
++#define YAFFS_TRACE_WRITE		0x00000080
++#define YAFFS_TRACE_TRACING		0x00000100
++#define YAFFS_TRACE_DELETION		0x00000200
++#define YAFFS_TRACE_BUFFERS		0x00000400
++#define YAFFS_TRACE_NANDACCESS		0x00000800
++#define YAFFS_TRACE_GC_DETAIL		0x00001000
++#define YAFFS_TRACE_SCAN_DEBUG		0x00002000
++#define YAFFS_TRACE_MTD			0x00004000
++#define YAFFS_TRACE_CHECKPOINT		0x00008000
++
++#define YAFFS_TRACE_VERIFY		0x00010000
++#define YAFFS_TRACE_VERIFY_NAND		0x00020000
++#define YAFFS_TRACE_VERIFY_FULL		0x00040000
++#define YAFFS_TRACE_VERIFY_ALL		0x000F0000
++
++
++#define YAFFS_TRACE_ERROR		0x40000000
++#define YAFFS_TRACE_BUG			0x80000000
++#define YAFFS_TRACE_ALWAYS		0xF0000000
++
++
++#define T(mask,p) do{ if((mask) & (yaffs_traceMask | YAFFS_TRACE_ALWAYS)) TOUT(p);} while(0)
++
++#ifndef CONFIG_YAFFS_WINCE
++#define YBUG() T(YAFFS_TRACE_BUG,(TSTR("==>> yaffs bug: " __FILE__ " %d" TENDSTR),__LINE__))
++#endif
++
++#endif
diff --git a/target/linux/generic-2.6/patches-2.6.22/900-headers_type_and_time.patch b/target/linux/generic-2.6/patches-2.6.22/900-headers_type_and_time.patch
new file mode 100644
index 0000000000..7eedc234ee
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/900-headers_type_and_time.patch
@@ -0,0 +1,48 @@
+diff -urN linux-2.6.21.1.old/include/linux/time.h linux-2.6.21.1.dev/include/linux/time.h
+--- linux-2.6.21.1.old/include/linux/time.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/linux/time.h	2007-05-26 21:23:19.859617680 +0200
+@@ -1,6 +1,10 @@
+ #ifndef _LINUX_TIME_H
+ #define _LINUX_TIME_H
+ 
++#ifndef __KERNEL__
++#include <time.h>
++#else
++
+ #include <linux/types.h>
+ 
+ #ifdef __KERNEL__
+@@ -225,4 +229,6 @@
+  */
+ #define TIMER_ABSTIME			0x01
+ 
++#endif /* __KERNEL__ DEBIAN */
++
+ #endif
+diff -urN linux-2.6.21.1.old/include/linux/types.h linux-2.6.21.1.dev/include/linux/types.h
+--- linux-2.6.21.1.old/include/linux/types.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/linux/types.h	2007-05-26 21:23:19.859617680 +0200
+@@ -1,6 +1,14 @@
+ #ifndef _LINUX_TYPES_H
+ #define _LINUX_TYPES_H
+ 
++/* Debian: Use userland types instead.  */
++#ifndef __KERNEL__
++# include <sys/types.h>
++/* For other kernel headers.  */
++# include <linux/posix_types.h>
++# include <asm/types.h>
++#else
++
+ #ifdef	__KERNEL__
+ 
+ #define BITS_TO_LONGS(bits) \
+@@ -162,6 +170,8 @@
+ 
+ #endif /* __KERNEL_STRICT_NAMES */
+ 
++#endif /* __KERNEL__ DEBIAN */
++
+ /*
+  * Below are truly Linux-specific types that should never collide with
+  * any application/library that wants linux/types.h.
diff --git a/target/linux/generic-2.6/patches-2.6.22/902-darwin_scripts_include.patch b/target/linux/generic-2.6/patches-2.6.22/902-darwin_scripts_include.patch
new file mode 100644
index 0000000000..8dd1e74841
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/902-darwin_scripts_include.patch
@@ -0,0 +1,108 @@
+diff -urN linux-2.6.21.1.old/scripts/genksyms/parse.c_shipped linux-2.6.21.1.dev/scripts/genksyms/parse.c_shipped
+--- linux-2.6.21.1.old/scripts/genksyms/parse.c_shipped	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/scripts/genksyms/parse.c_shipped	2007-05-26 21:26:23.565690112 +0200
+@@ -144,7 +144,9 @@
+ 
+ 
+ #include <assert.h>
++#ifndef __APPLE__
+ #include <malloc.h>
++#endif
+ #include "genksyms.h"
+ 
+ static int is_typedef;
+diff -urN linux-2.6.21.1.old/scripts/genksyms/parse.y linux-2.6.21.1.dev/scripts/genksyms/parse.y
+--- linux-2.6.21.1.old/scripts/genksyms/parse.y	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/scripts/genksyms/parse.y	2007-05-26 21:26:23.563690416 +0200
+@@ -24,7 +24,9 @@
+ %{
+ 
+ #include <assert.h>
++#ifndef __APPLE__
+ #include <malloc.h>
++#endif
+ #include "genksyms.h"
+ 
+ static int is_typedef;
+diff -urN linux-2.6.21.1.old/scripts/kallsyms.c linux-2.6.21.1.dev/scripts/kallsyms.c
+--- linux-2.6.21.1.old/scripts/kallsyms.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/scripts/kallsyms.c	2007-05-26 21:26:23.579687984 +0200
+@@ -30,6 +30,35 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <ctype.h>
++#ifdef __APPLE__
++/* Darwin has no memmem implementation, this one is ripped of the uClibc-0.9.28 source */
++void *memmem (const void *haystack, size_t haystack_len,
++                          const void *needle,  size_t needle_len)
++{
++  const char *begin;
++  const char *const last_possible
++    = (const char *) haystack + haystack_len - needle_len;
++
++  if (needle_len == 0)
++    /* The first occurrence of the empty string is deemed to occur at
++       the beginning of the string.  */
++    return (void *) haystack;
++
++  /* Sanity check, otherwise the loop might search through the whole
++     memory.  */
++  if (__builtin_expect (haystack_len < needle_len, 0))
++    return NULL;
++
++  for (begin = (const char *) haystack; begin <= last_possible; ++begin)
++    if (begin[0] == ((const char *) needle)[0] &&
++        !memcmp ((const void *) &begin[1],
++                 (const void *) ((const char *) needle + 1),
++                 needle_len - 1))
++      return (void *) begin;
++
++  return NULL;
++}
++#endif
+ 
+ #define KSYM_NAME_LEN		127
+ 
+diff -urN linux-2.6.21.1.old/scripts/kconfig/Makefile linux-2.6.21.1.dev/scripts/kconfig/Makefile
+--- linux-2.6.21.1.old/scripts/kconfig/Makefile	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/scripts/kconfig/Makefile	2007-05-26 21:26:23.553691936 +0200
+@@ -87,6 +87,9 @@
+ # we really need to do so. (Do not call gcc as part of make mrproper)
+ HOST_EXTRACFLAGS = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags)
+ HOST_LOADLIBES   = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ldflags $(HOSTCC))
++ifeq ($(shell uname -s),Darwin)
++HOST_LOADLIBES  += -lncurses
++endif
+ 
+ HOST_EXTRACFLAGS += -DLOCALE
+ 
+diff -urN linux-2.6.21.1.old/scripts/mod/mk_elfconfig.c linux-2.6.21.1.dev/scripts/mod/mk_elfconfig.c
+--- linux-2.6.21.1.old/scripts/mod/mk_elfconfig.c	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/scripts/mod/mk_elfconfig.c	2007-05-26 21:26:23.553691936 +0200
+@@ -1,7 +1,11 @@
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
++#ifndef __APPLE__
+ #include <elf.h>
++#else
++#include "../../../../../tools/sstrip/include/elf.h"
++#endif
+ 
+ int
+ main(int argc, char **argv)
+diff -urN linux-2.6.21.1.old/scripts/mod/modpost.h linux-2.6.21.1.dev/scripts/mod/modpost.h
+--- linux-2.6.21.1.old/scripts/mod/modpost.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/scripts/mod/modpost.h	2007-05-26 21:26:23.553691936 +0200
+@@ -7,7 +7,11 @@
+ #include <sys/mman.h>
+ #include <fcntl.h>
+ #include <unistd.h>
++#ifndef __APPLE__
+ #include <elf.h>
++#else
++#include "../../../../../tools/sstrip/include/elf.h"
++#endif
+ 
+ #include "elfconfig.h"
+ 
diff --git a/target/linux/generic-2.6/patches-2.6.22/903-stddef_include.patch b/target/linux/generic-2.6/patches-2.6.22/903-stddef_include.patch
new file mode 100644
index 0000000000..9de81aafbb
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/903-stddef_include.patch
@@ -0,0 +1,18 @@
+diff -urN linux-2.6.21.1.old/include/linux/stddef.h linux-2.6.21.1.dev/include/linux/stddef.h
+--- linux-2.6.21.1.old/include/linux/stddef.h	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/include/linux/stddef.h	2007-05-26 21:29:46.108898864 +0200
+@@ -16,6 +16,7 @@
+ 	false	= 0,
+ 	true	= 1
+ };
++#endif /* __KERNEL__ */
+ 
+ #undef offsetof
+ #ifdef __compiler_offsetof
+@@ -23,6 +24,5 @@
+ #else
+ #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+ #endif
+-#endif /* __KERNEL__ */
+ 
+ #endif
diff --git a/target/linux/generic-2.6/patches-2.6.22/904-ls_time_locale.patch b/target/linux/generic-2.6/patches-2.6.22/904-ls_time_locale.patch
new file mode 100644
index 0000000000..175a93ba73
--- /dev/null
+++ b/target/linux/generic-2.6/patches-2.6.22/904-ls_time_locale.patch
@@ -0,0 +1,21 @@
+diff -urN linux-2.6.21.1.old/scripts/gen_initramfs_list.sh linux-2.6.21.1.dev/scripts/gen_initramfs_list.sh
+--- linux-2.6.21.1.old/scripts/gen_initramfs_list.sh	2007-04-27 23:49:26.000000000 +0200
++++ linux-2.6.21.1.dev/scripts/gen_initramfs_list.sh	2007-05-26 21:32:53.679383816 +0200
+@@ -125,7 +125,7 @@
+ 			str="${ftype} ${name} ${location} ${str}"
+ 			;;
+ 		"nod")
+-			local dev=`LC_ALL=C ls -l "${location}"`
++			local dev=`LC_ALL=C ls -l --time-style=locale "${location}"`
+ 			local maj=`field 5 ${dev}`
+ 			local min=`field 6 ${dev}`
+ 			maj=${maj%,}
+@@ -135,7 +135,7 @@
+ 			str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
+ 			;;
+ 		"slink")
+-			local target=`field 11 $(LC_ALL=C ls -l "${location}")`
++			local target=`field 11 $(LC_ALL=C ls -l --time-style=locale "${location}")`
+ 			str="${ftype} ${name} ${target} ${str}"
+ 			;;
+ 		*)
diff --git a/target/linux/iop32x-2.6/config/default b/target/linux/iop32x-2.6/config/default
index 22e7e02efc..a066b8e31e 100644
--- a/target/linux/iop32x-2.6/config/default
+++ b/target/linux/iop32x-2.6/config/default
@@ -7,6 +7,7 @@ CONFIG_ALIGNMENT_TRAP=y
 # CONFIG_ARCH_CLPS711X is not set
 # CONFIG_ARCH_CLPS7500 is not set
 # CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_DAVINCI is not set
 # CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
 # CONFIG_ARCH_EBSA110 is not set
 # CONFIG_ARCH_EP93XX is not set
@@ -22,6 +23,7 @@ CONFIG_ARCH_IOP32X=y
 # CONFIG_ARCH_IXP23XX is not set
 # CONFIG_ARCH_IQ80321 is not set
 # CONFIG_ARCH_IQ31244 is not set
+# CONFIG_ARCH_KS8695 is not set
 # CONFIG_ARCH_L7200 is not set
 # CONFIG_ARCH_LH7A40X is not set
 # CONFIG_ARCH_NETX is not set
@@ -103,6 +105,9 @@ CONFIG_DLCI_MAX=8
 CONFIG_DNOTIFY=y
 # CONFIG_DSCC4 is not set
 # CONFIG_E100 is not set
+CONFIG_E1000=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
+CONFIG_E1000_NAPI=y
 # CONFIG_FARSYNC is not set
 CONFIG_FIRMWARE_EDID=y
 # CONFIG_FPE_FASTFPE is not set
@@ -157,6 +162,7 @@ CONFIG_I2C_CHARDEV=y
 # CONFIG_IEEE80211_SOFTMAC is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_INITRAMFS_SOURCE=""
+CONFIG_IOP3XX_ATU=y
 # CONFIG_IP6_NF_MANGLE is not set
 # CONFIG_IP6_NF_MATCH_EUI64 is not set
 # CONFIG_IP6_NF_MATCH_FRAG is not set
@@ -255,6 +261,7 @@ CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_NATSEMI is not set
 # CONFIG_NE2K_PCI is not set
+CONFIG_NETDEV_1000=y
 CONFIG_NETFILTER_NETLINK=m
 # CONFIG_NETFILTER_NETLINK_LOG is not set
 # CONFIG_NETFILTER_NETLINK_QUEUE is not set
@@ -319,6 +326,9 @@ CONFIG_PLAT_IOP=y
 # CONFIG_PM is not set
 # CONFIG_PPP is not set
 # CONFIG_PRISM54 is not set
+CONFIG_R8169=y
+CONFIG_R8169_NAPI=y
+CONFIG_R8169_VLAN=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_ROMFS_FS is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
diff --git a/target/linux/ixp4xx-2.6/config/default b/target/linux/ixp4xx-2.6/config/default
index f0d089ad80..c93d82df91 100644
--- a/target/linux/ixp4xx-2.6/config/default
+++ b/target/linux/ixp4xx-2.6/config/default
@@ -8,6 +8,7 @@ CONFIG_ALIGNMENT_TRAP=y
 # CONFIG_ARCH_CLPS711X is not set
 # CONFIG_ARCH_CLPS7500 is not set
 # CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_DAVINCI is not set
 # CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
 # CONFIG_ARCH_EBSA110 is not set
 # CONFIG_ARCH_EP93XX is not set
@@ -24,6 +25,7 @@ CONFIG_ARCH_IXDP4XX=y
 # CONFIG_ARCH_IXP2000 is not set
 # CONFIG_ARCH_IXP23XX is not set
 CONFIG_ARCH_IXP4XX=y
+# CONFIG_ARCH_KS8695 is not set
 # CONFIG_ARCH_L7200 is not set
 # CONFIG_ARCH_LH7A40X is not set
 # CONFIG_ARCH_NETX is not set
@@ -144,6 +146,7 @@ CONFIG_I2C_CHARDEV=y
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
 # CONFIG_I2C_DEBUG_CORE is not set
+CONFIG_I2C_GPIO=y
 # CONFIG_I2C_I801 is not set
 # CONFIG_I2C_I810 is not set
 # CONFIG_I2C_IOP3XX is not set
@@ -221,10 +224,12 @@ CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_LLC2 is not set
 CONFIG_MACH_AVILA=y
 CONFIG_MACH_COMPEX=y
+CONFIG_MACH_DSMG600=y
 CONFIG_MACH_GATEWAY7001=y
 # CONFIG_MACH_GTWX5715 is not set
 # CONFIG_MACH_IXDP465 is not set
 # CONFIG_MACH_IXDPG425 is not set
+# CONFIG_MACH_KIXRP435 is not set
 CONFIG_MACH_LOFT=y
 CONFIG_MACH_NAS100D=y
 CONFIG_MACH_NSLU2=y
@@ -291,6 +296,7 @@ CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_NATSEMI is not set
 # CONFIG_NE2K_PCI is not set
+# CONFIG_NETDEV_1000 is not set
 CONFIG_NETFILTER_NETLINK=m
 # CONFIG_NETFILTER_NETLINK_LOG is not set
 # CONFIG_NETFILTER_NETLINK_QUEUE is not set
diff --git a/target/linux/magicbox-2.6/config/default b/target/linux/magicbox-2.6/config/default
index cc53f5e56d..83c9d4b2a8 100644
--- a/target/linux/magicbox-2.6/config/default
+++ b/target/linux/magicbox-2.6/config/default
@@ -9,6 +9,7 @@ CONFIG_4xx=y
 # CONFIG_AGP is not set
 CONFIG_ARCH_MAY_HAVE_PC_FDC=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
+# CONFIG_ATM_DRIVERS is not set
 # CONFIG_B44 is not set
 CONFIG_BASE_SMALL=0
 CONFIG_BIOS_FIXUP=y
@@ -64,6 +65,7 @@ CONFIG_IDE=m
 # CONFIG_IDE_ARM is not set
 CONFIG_IDE_GENERIC=m
 CONFIG_IDE_MAX_HWIFS=4
+# CONFIG_IDE_PROC_FS is not set
 # CONFIG_IDE_TASK_IOCTL is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_ISA_DMA_API=y
@@ -72,6 +74,7 @@ CONFIG_KERNEL_START=0xc0000000
 # CONFIG_KEXEC is not set
 # CONFIG_LEDS_TRIGGER_IDE_DISK is not set
 CONFIG_LOWMEM_SIZE=0x30000000
+# CONFIG_MACINTOSH_DRIVERS is not set
 CONFIG_MAGICBOX=y
 CONFIG_MATH_EMULATION=y
 CONFIG_MINI_FO=y
@@ -130,6 +133,7 @@ CONFIG_MTD_SPLIT_ROOTFS=y
 # CONFIG_NATSEMI is not set
 # CONFIG_NE2K_PCI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+CONFIG_NETDEV_1000=y
 CONFIG_NOT_COHERENT_CACHE=y
 # CONFIG_NVRAM is not set
 # CONFIG_PCIPCWATCHDOG is not set
diff --git a/target/linux/rb532-2.6/config/default b/target/linux/rb532-2.6/config/default
index 6031639b43..b537033de6 100644
--- a/target/linux/rb532-2.6/config/default
+++ b/target/linux/rb532-2.6/config/default
@@ -58,6 +58,7 @@ CONFIG_INITRAMFS_SOURCE=""
 CONFIG_IRQ_CPU=y
 CONFIG_JFFS2_FS_DEBUG=0
 CONFIG_KORINA=y
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -125,7 +126,9 @@ CONFIG_MTD_NAND=y
 # CONFIG_MTD_NAND_DISKONCHIP is not set
 # CONFIG_MTD_NAND_ECC_SMC is not set
 CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
 # CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
 CONFIG_MTD_NAND_RB500=y
 CONFIG_MTD_NAND_VERIFY_WRITE=y
 # CONFIG_MTD_OBSOLETE_CHIPS is not set
@@ -142,6 +145,7 @@ CONFIG_MTD_PARTITIONS=y
 # CONFIG_NE2K_PCI is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_PAGE_SIZE_16KB is not set
 CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set
@@ -176,6 +180,7 @@ CONFIG_SYS_HAS_CPU_MIPS32_R1=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
 CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+# CONFIG_TC35815 is not set
 # CONFIG_TOSHIBA_JMR3927 is not set
 # CONFIG_TOSHIBA_RBTX4927 is not set
 # CONFIG_TOSHIBA_RBTX4938 is not set
diff --git a/target/linux/rdc-2.6/config/default b/target/linux/rdc-2.6/config/default
index 00652accb5..57632c012c 100644
--- a/target/linux/rdc-2.6/config/default
+++ b/target/linux/rdc-2.6/config/default
@@ -9,6 +9,7 @@
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_ARCH_MAY_HAVE_PC_FDC=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
+# CONFIG_ATM_DRIVERS is not set
 CONFIG_BASE_SMALL=0
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -112,6 +113,7 @@ CONFIG_M486=y
 # CONFIG_M586TSC is not set
 # CONFIG_M686 is not set
 # CONFIG_MACHZ_WDT is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
 CONFIG_MATH_EMULATION=y
 # CONFIG_MCA is not set
 # CONFIG_MCORE2 is not set
@@ -185,6 +187,7 @@ CONFIG_MTD_RDC3210_SIZE=0x400000
 # CONFIG_MTD_TS5500 is not set
 # CONFIG_MTRR is not set
 # CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
 # CONFIG_MWAVE is not set
 # CONFIG_MWINCHIP2 is not set
 # CONFIG_MWINCHIP3D is not set
@@ -198,6 +201,7 @@ CONFIG_NET_ACT_SIMP=m
 # CONFIG_NET_PKTGEN is not set
 CONFIG_NET_SCH_FIFO=y
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NETDEV_1000 is not set
 # CONFIG_NFS_V4 is not set
 CONFIG_NOHIGHMEM=y
 CONFIG_NO_HZ=y
@@ -261,6 +265,7 @@ CONFIG_USB_XUSBATM=m
 # CONFIG_VM86 is not set
 # CONFIG_VMSPLIT_1G is not set
 # CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_2G_OPT is not set
 CONFIG_VMSPLIT_3G=y
 # CONFIG_VMSPLIT_3G_OPT is not set
 CONFIG_VM_EVENT_COUNTERS=y
diff --git a/target/linux/sibyte-2.6/config/default b/target/linux/sibyte-2.6/config/default
index cf530a51f1..8d9895e02e 100644
--- a/target/linux/sibyte-2.6/config/default
+++ b/target/linux/sibyte-2.6/config/default
@@ -142,6 +142,7 @@ CONFIG_JFFS2_FS_DEBUG=0
 # CONFIG_LLC2 is not set
 CONFIG_LOCK_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=15
+# CONFIG_MACH_ALCHEMY is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_MACH_VR41XX is not set
@@ -247,6 +248,7 @@ CONFIG_MTD_PARTITIONS=y
 CONFIG_NET_SB1250_MAC=y
 # CONFIG_NET_SCH_NETEM is not set
 # CONFIG_NET_VENDOR_3COM is not set
+CONFIG_NETDEV_1000=y
 CONFIG_NR_CPUS=2
 # CONFIG_NTFS_FS is not set
 # CONFIG_N_HDLC is not set
diff --git a/target/linux/uml-2.6/config/i386 b/target/linux/uml-2.6/config/i386
index 35a7e74b50..f2286da4e1 100644
--- a/target/linux/uml-2.6/config/i386
+++ b/target/linux/uml-2.6/config/i386
@@ -80,6 +80,7 @@ CONFIG_MPENTIUMII=y
 # CONFIG_MPENTIUMIII is not set
 # CONFIG_MPENTIUMM is not set
 # CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
 # CONFIG_MWINCHIP2 is not set
 # CONFIG_MWINCHIP3D is not set
 # CONFIG_MWINCHIPC6 is not set
diff --git a/target/linux/x86-2.6/Makefile b/target/linux/x86-2.6/Makefile
index ff52093ef3..06decc151f 100644
--- a/target/linux/x86-2.6/Makefile
+++ b/target/linux/x86-2.6/Makefile
@@ -11,6 +11,9 @@ BOARD:=x86
 BOARDNAME:=x86
 FEATURES:=squashfs jffs2 ext2
 
+#LINUX_VERSION:=2.6.22-rc4
+#LINUX_KERNEL_MD5SUM:=125879208b47a4c27e4f31a33ab3011c
+
 include $(INCLUDE_DIR)/kernel-build.mk
 DEFAULT_PACKAGES += kmod-natsemi kmod-ne2k-pci
 
diff --git a/target/linux/x86-2.6/config/default b/target/linux/x86-2.6/config/default
index fbddfea513..5e4030773b 100644
--- a/target/linux/x86-2.6/config/default
+++ b/target/linux/x86-2.6/config/default
@@ -25,6 +25,7 @@ CONFIG_ATA=m
 # CONFIG_ATA_GENERIC is not set
 # CONFIG_ATA_NONSTANDARD is not set
 CONFIG_ATA_PIIX=m
+# CONFIG_ATM_DRIVERS is not set
 CONFIG_BASE_SMALL=0
 # CONFIG_BINFMT_AOUT is not set
 CONFIG_BITREVERSE=y
@@ -134,6 +135,7 @@ CONFIG_IDEPCI_SHARE_IRQ=y
 # CONFIG_IDE_CHIPSETS is not set
 CONFIG_IDE_GENERIC=y
 CONFIG_IDE_MAX_HWIFS=4
+# CONFIG_IDE_PROC_FS is not set
 # CONFIG_IDE_TASK_IOCTL is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_INPUT=y
@@ -169,6 +171,7 @@ CONFIG_M486=y
 # CONFIG_M586TSC is not set
 # CONFIG_M686 is not set
 # CONFIG_MACHZ_WDT is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
 # CONFIG_MATH_EMULATION is not set
 # CONFIG_MCA is not set
 # CONFIG_MCORE2 is not set
@@ -183,10 +186,17 @@ CONFIG_M486=y
 # CONFIG_MK6 is not set
 # CONFIG_MK7 is not set
 # CONFIG_MK8 is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
 # CONFIG_MOUSE_INPORT is not set
 # CONFIG_MOUSE_LOGIBM is not set
 # CONFIG_MOUSE_PC110PAD is not set
 CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LIFEBOOK is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
 # CONFIG_MOUSE_SERIAL is not set
 # CONFIG_MOUSE_VSXXXAA is not set
 # CONFIG_MPENTIUM4 is not set
@@ -232,6 +242,7 @@ CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_TS5500 is not set
 # CONFIG_MTRR is not set
 # CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
 # CONFIG_MWAVE is not set
 # CONFIG_MWINCHIP2 is not set
 # CONFIG_MWINCHIP3D is not set
@@ -244,6 +255,7 @@ CONFIG_NET_SCH_FIFO=y
 CONFIG_NET_VENDOR_3COM=y
 # CONFIG_NET_VENDOR_RACAL is not set
 # CONFIG_NET_VENDOR_SMC is not set
+CONFIG_NETDEV_1000=y
 CONFIG_NOHIGHMEM=y
 # CONFIG_NO_HZ is not set
 CONFIG_NSC_GPIO=m
@@ -295,6 +307,8 @@ CONFIG_SCx200_I2C_SDA=13
 CONFIG_SCx200_WDT=m
 # CONFIG_SECCOMP is not set
 CONFIG_SEMAPHORE_SLEEPERS=y
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_SENSORS_CORETEMP is not set
 CONFIG_SENSORS_PC87360=m
 # CONFIG_SERIAL_8250_CS is not set
 # CONFIG_SERIAL_8250_EXTENDED is not set
@@ -341,6 +355,7 @@ CONFIG_VIA_RHINE_NAPI=y
 # CONFIG_VM86 is not set
 # CONFIG_VMSPLIT_1G is not set
 # CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_2G_OPT is not set
 CONFIG_VMSPLIT_3G=y
 # CONFIG_VMSPLIT_3G_OPT is not set
 CONFIG_VORTEX=m