From: Felix Fietkau <nbd@openwrt.org>
Date: Thu, 6 Apr 2006 15:52:01 +0000 (+0000)
Subject: integrate the newer broadcom wl driver from us robotics
X-Git-Url: http://git.cdn.openwrt.org/?a=commitdiff_plain;h=ab9e191898c7ad88ccf5146fa409d14c346486c8;p=openwrt%2Fstaging%2Fthess.git

integrate the newer broadcom wl driver from us robotics

SVN-Revision: 3596
---

diff --git a/openwrt/target/linux/brcm-2.4/Makefile b/openwrt/target/linux/brcm-2.4/Makefile
index ab68b7ae88..17473be323 100644
--- a/openwrt/target/linux/brcm-2.4/Makefile
+++ b/openwrt/target/linux/brcm-2.4/Makefile
@@ -13,7 +13,7 @@ $(eval $(call KMOD_template,BRCM_WL,brcm-wl,\
 ,CONFIG_WL,,20,wl))
 
 $(eval $(call KMOD_template,BRCM_WL2,brcm-wl2,\
-	$(BUILD_DIR)/wl/wl2/wl.o \
+	$(MODULES_DIR)/kernel/drivers/net/wl2/wl.o \
 ,CONFIG_WL,,20,wl))
 
 $(eval $(call KMOD_template,LP,lp,\
@@ -40,8 +40,8 @@ $(LINUX_DIR)/.patched: $(LINUX_DIR)/.unpacked
 
 LINUX_BINARY_DRIVER_SITE=http://openwrt.org/downloads/sources
 # proprietary driver, extracted from Linksys GPL sourcetree WRT54GS 4.70.6
-LINUX_BINARY_WL_DRIVER=kernel-binary-wl-0.5.tar.gz
-LINUX_BINARY_WL_MD5SUM=78e839842bdc04022bb44469f92b1131
+LINUX_BINARY_WL_DRIVER=kernel-binary-wl-0.6.tar.gz
+LINUX_BINARY_WL_MD5SUM=4fc1d5b46bcb7a17d6d5dd31da9c8d7f
 
 $(DL_DIR)/$(LINUX_BINARY_WL_DRIVER):
 	$(SCRIPT_DIR)/download.pl $(DL_DIR) $(LINUX_BINARY_WL_DRIVER) $(LINUX_BINARY_WL_MD5SUM) $(LINUX_BINARY_DRIVER_SITE) $(MAKE_TRACE)
@@ -56,12 +56,15 @@ $(LINUX_DIR)/.drivers-unpacked: $(LINUX_DIR)/.unpacked
 	zcat $(DL_DIR)/$(LINUX_BINARY_WL_DRIVER) | tar -C $(BUILD_DIR) $(TAR_OPTIONS) -
 	# copy binary wlan driver
 	mkdir -p $(LINUX_DIR)/drivers/net/wl
-	$(CP) $(BUILD_DIR)/wl/*.o $(LINUX_DIR)/drivers/net/wl
+	$(CP) $(BUILD_DIR)/kernel-binary-wl/old/*.o $(LINUX_DIR)/drivers/net/wl
+	$(CP) $(BUILD_DIR)/kernel-binary-wl/new/*.o $(LINUX_DIR)/drivers/net/wl2
 	touch $@
 
 $(LINUX_DIR)/.drivers-installed: $(LINUX_DIR)/.modules_done
 	mkdir -p $(LINUX_BUILD_DIR)/modules/lib/modules/2.4.32/kernel/drivers/net/wl
+	mkdir -p $(LINUX_BUILD_DIR)/modules/lib/modules/2.4.32/kernel/drivers/net/wl2
 	@-[ -f $(LINUX_BUILD_DIR)/modules/lib/modules/2.4.32/kernel/drivers/net/wl/wl.o ] || $(CP) $(LINUX_DIR)/drivers/net/wl/wl.o $(LINUX_BUILD_DIR)/modules/lib/modules/2.4.32/kernel/drivers/net/wl/
+	@-[ -f $(LINUX_BUILD_DIR)/modules/lib/modules/2.4.32/kernel/drivers/net/wl2/wl.o ] || $(CP) $(LINUX_DIR)/drivers/net/wl2/wl.o $(LINUX_BUILD_DIR)/modules/lib/modules/2.4.32/kernel/drivers/net/wl2/
 	touch $@
 
 linux-dirclean: drivers-clean
diff --git a/openwrt/target/linux/brcm-2.4/config b/openwrt/target/linux/brcm-2.4/config
index c4cf5340ea..9759b90f5f 100644
--- a/openwrt/target/linux/brcm-2.4/config
+++ b/openwrt/target/linux/brcm-2.4/config
@@ -833,6 +833,7 @@ CONFIG_NET_RADIO=y
 # CONFIG_AIRO is not set
 # CONFIG_HERMES is not set
 CONFIG_WL=m
+CONFIG_WL2=m
 # CONFIG_PLX_HERMES is not set
 # CONFIG_TMD_HERMES is not set
 # CONFIG_PCI_HERMES is not set
diff --git a/openwrt/target/linux/brcm-2.4/patches/001-bcm47xx.patch b/openwrt/target/linux/brcm-2.4/patches/001-bcm47xx.patch
index 2e64b0fc3e..d662b4c015 100644
--- a/openwrt/target/linux/brcm-2.4/patches/001-bcm47xx.patch
+++ b/openwrt/target/linux/brcm-2.4/patches/001-bcm47xx.patch
@@ -1,6 +1,56 @@
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/cfe_env.c linux-2.4.32-brcm/arch/mips/bcm947xx/cfe_env.c
---- linux-2.4.32/arch/mips/bcm947xx/cfe_env.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/cfe_env.c	2005-12-19 01:56:35.104829500 +0100
+diff -Naur linux.old/arch/mips/Makefile linux.dev/arch/mips/Makefile
+--- linux.old/arch/mips/Makefile	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/Makefile	2006-04-06 15:34:15.000000000 +0200
+@@ -726,6 +726,19 @@
+ endif
+ 
+ #
++# Broadcom BCM947XX variants
++#
++ifdef CONFIG_BCM947XX
++LIBS		+= arch/mips/bcm947xx/generic/brcm.o arch/mips/bcm947xx/bcm947xx.o 
++SUBDIRS		+= arch/mips/bcm947xx/generic arch/mips/bcm947xx 
++LOADADDR	:= 0x80001000
++
++zImage: vmlinux
++	$(MAKE) -C arch/$(ARCH)/bcm947xx/compressed
++export LOADADDR
++endif
++
++#
+ # Choosing incompatible machines durings configuration will result in
+ # error messages during linking.  Select a default linkscript if
+ # none has been choosen above.
+@@ -778,6 +791,7 @@
+ 	$(MAKE) -C arch/$(ARCH)/tools clean
+ 	$(MAKE) -C arch/mips/baget clean
+ 	$(MAKE) -C arch/mips/lasat clean
++	$(MAKE) -C arch/mips/bcm947xx/compressed clean
+ 
+ archmrproper:
+ 	@$(MAKEBOOT) mrproper
+diff -Naur linux.old/arch/mips/bcm947xx/Makefile linux.dev/arch/mips/bcm947xx/Makefile
+--- linux.old/arch/mips/bcm947xx/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/Makefile	2006-04-06 15:34:14.000000000 +0200
+@@ -0,0 +1,15 @@
++#
++# Makefile for the BCM947xx specific kernel interface routines
++# under Linux.
++#
++
++EXTRA_CFLAGS+=-I$(TOPDIR)/arch/mips/bcm947xx/include -DBCMDRIVER
++
++O_TARGET        := bcm947xx.o
++
++export-objs     := nvram_linux.o setup.o
++obj-y		:= prom.o setup.o time.o sbmips.o gpio.o
++obj-y		+= nvram.o nvram_linux.o sflash.o cfe_env.o
++obj-$(CONFIG_PCI) += sbpci.o pcibios.o
++
++include $(TOPDIR)/Rules.make
+diff -Naur linux.old/arch/mips/bcm947xx/cfe_env.c linux.dev/arch/mips/bcm947xx/cfe_env.c
+--- linux.old/arch/mips/bcm947xx/cfe_env.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/cfe_env.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,234 @@
 +/*
 + * NVRAM variable manipulation (Linux kernel half)
@@ -236,9 +286,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/cfe_env.c linux-2.4.32-brcm/arch/mips/
 +
 +}
 +
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/compressed/Makefile linux-2.4.32-brcm/arch/mips/bcm947xx/compressed/Makefile
---- linux-2.4.32/arch/mips/bcm947xx/compressed/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/compressed/Makefile	2005-12-16 23:39:10.668819500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/compressed/Makefile linux.dev/arch/mips/bcm947xx/compressed/Makefile
+--- linux.old/arch/mips/bcm947xx/compressed/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/compressed/Makefile	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,33 @@
 +#
 +# Makefile for Broadcom BCM947XX boards
@@ -273,9 +323,28 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/compressed/Makefile linux-2.4.32-brcm/
 +
 +clean:
 +	rm -f vmlinuz piggy
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/generic/int-handler.S linux-2.4.32-brcm/arch/mips/bcm947xx/generic/int-handler.S
---- linux-2.4.32/arch/mips/bcm947xx/generic/int-handler.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/generic/int-handler.S	2005-12-16 23:39:10.668819500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/generic/Makefile linux.dev/arch/mips/bcm947xx/generic/Makefile
+--- linux.old/arch/mips/bcm947xx/generic/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/generic/Makefile	2006-04-06 15:34:14.000000000 +0200
+@@ -0,0 +1,15 @@
++#
++# Makefile for the BCM947xx specific kernel interface routines
++# under Linux.
++#
++
++.S.s:
++	$(CPP) $(AFLAGS) $< -o $*.s
++.S.o:
++	$(CC) $(AFLAGS) -c $< -o $*.o
++
++O_TARGET        := brcm.o
++
++obj-y	:= int-handler.o irq.o
++
++include $(TOPDIR)/Rules.make
+diff -Naur linux.old/arch/mips/bcm947xx/generic/int-handler.S linux.dev/arch/mips/bcm947xx/generic/int-handler.S
+--- linux.old/arch/mips/bcm947xx/generic/int-handler.S	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/generic/int-handler.S	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,51 @@
 +/*
 + * Generic interrupt handler for Broadcom MIPS boards
@@ -328,9 +397,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/generic/int-handler.S linux-2.4.32-brc
 +	 nop
 +		
 +	END(brcmIRQ)
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/generic/irq.c linux-2.4.32-brcm/arch/mips/bcm947xx/generic/irq.c
---- linux-2.4.32/arch/mips/bcm947xx/generic/irq.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/generic/irq.c	2005-12-16 23:39:10.668819500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/generic/irq.c linux.dev/arch/mips/bcm947xx/generic/irq.c
+--- linux.old/arch/mips/bcm947xx/generic/irq.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/generic/irq.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,130 @@
 +/*
 + * Generic interrupt control functions for Broadcom MIPS boards
@@ -462,28 +531,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/generic/irq.c linux-2.4.32-brcm/arch/m
 +	breakpoint(); 
 +#endif
 +}
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/generic/Makefile linux-2.4.32-brcm/arch/mips/bcm947xx/generic/Makefile
---- linux-2.4.32/arch/mips/bcm947xx/generic/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/generic/Makefile	2005-12-16 23:39:10.668819500 +0100
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the BCM947xx specific kernel interface routines
-+# under Linux.
-+#
-+
-+.S.s:
-+	$(CPP) $(AFLAGS) $< -o $*.s
-+.S.o:
-+	$(CC) $(AFLAGS) -c $< -o $*.o
-+
-+O_TARGET        := brcm.o
-+
-+obj-y	:= int-handler.o irq.o
-+
-+include $(TOPDIR)/Rules.make
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/gpio.c linux-2.4.32-brcm/arch/mips/bcm947xx/gpio.c
---- linux-2.4.32/arch/mips/bcm947xx/gpio.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/gpio.c	2005-12-16 23:39:10.668819500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/gpio.c linux.dev/arch/mips/bcm947xx/gpio.c
+--- linux.old/arch/mips/bcm947xx/gpio.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/gpio.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,158 @@
 +/*
 + * GPIO char driver
@@ -643,9 +693,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/gpio.c linux-2.4.32-brcm/arch/mips/bcm
 +
 +module_init(gpio_init);
 +module_exit(gpio_exit);
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmdevs.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmdevs.h
---- linux-2.4.32/arch/mips/bcm947xx/include/bcmdevs.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmdevs.h	2005-12-16 23:39:10.672819750 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/bcmdevs.h linux.dev/arch/mips/bcm947xx/include/bcmdevs.h
+--- linux.old/arch/mips/bcm947xx/include/bcmdevs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/bcmdevs.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,391 @@
 +/*
 + * Broadcom device-specific manifest constants.
@@ -1038,9 +1088,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmdevs.h linux-2.4.32-brcm/ar
 +#define GPIO_NUMPINS		16
 +
 +#endif /* _BCMDEVS_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmendian.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmendian.h
---- linux-2.4.32/arch/mips/bcm947xx/include/bcmendian.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmendian.h	2005-12-16 23:39:10.672819750 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/bcmendian.h linux.dev/arch/mips/bcm947xx/include/bcmendian.h
+--- linux.old/arch/mips/bcm947xx/include/bcmendian.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/bcmendian.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,152 @@
 +/*
 + * local version of endian.h - byte order defines
@@ -1194,9 +1244,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmendian.h linux-2.4.32-brcm/
 +)
 +
 +#endif /* _BCMENDIAN_H_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmnvram.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmnvram.h
---- linux-2.4.32/arch/mips/bcm947xx/include/bcmnvram.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmnvram.h	2005-12-16 23:39:10.700821500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/bcmnvram.h linux.dev/arch/mips/bcm947xx/include/bcmnvram.h
+--- linux.old/arch/mips/bcm947xx/include/bcmnvram.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/bcmnvram.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,141 @@
 +/*
 + * NVRAM variable manipulation
@@ -1339,9 +1389,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmnvram.h linux-2.4.32-brcm/a
 +#define NVRAM_MAX_PARAM_LEN 64
 +
 +#endif /* _bcmnvram_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmsrom.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmsrom.h
---- linux-2.4.32/arch/mips/bcm947xx/include/bcmsrom.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmsrom.h	2005-12-16 23:39:10.704821750 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/bcmsrom.h linux.dev/arch/mips/bcm947xx/include/bcmsrom.h
+--- linux.old/arch/mips/bcm947xx/include/bcmsrom.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/bcmsrom.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,23 @@
 +/*
 + * Misc useful routines to access NIC local SROM/OTP .
@@ -1366,10 +1416,10 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmsrom.h linux-2.4.32-brcm/ar
 +extern int srom_write(uint bus, void *curmap, osl_t *osh, uint byteoff, uint nbytes, uint16 *buf);
 +
 +#endif	/* _bcmsrom_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmutils.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmutils.h
---- linux-2.4.32/arch/mips/bcm947xx/include/bcmutils.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/bcmutils.h	2005-12-16 23:39:10.704821750 +0100
-@@ -0,0 +1,313 @@
+diff -Naur linux.old/arch/mips/bcm947xx/include/bcmutils.h linux.dev/arch/mips/bcm947xx/include/bcmutils.h
+--- linux.old/arch/mips/bcm947xx/include/bcmutils.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/bcmutils.h	2006-04-06 17:07:30.000000000 +0200
+@@ -0,0 +1,287 @@
 +/*
 + * Misc useful os-independent macros and functions.
 + *
@@ -1429,32 +1479,6 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmutils.h linux-2.4.32-brcm/a
 +	} \
 +}
 +
-+/* generic osl packet queue */
-+struct pktq {
-+	void *head;	/* first packet to dequeue */
-+	void *tail;	/* last packet to dequeue */
-+	uint len;	/* number of queued packets */
-+	uint maxlen;	/* maximum number of queued packets */
-+	bool priority;	/* enqueue by packet priority */
-+	uint8 prio_map[MAXPRIO+1]; /* user priority to packet enqueue policy map */
-+};
-+#define DEFAULT_QLEN	128
-+
-+#define	pktq_len(q)	((q)->len)
-+#define	pktq_avail(q)	((q)->maxlen - (q)->len)
-+#define	pktq_head(q)	((q)->head)
-+#define	pktq_full(q)	((q)->len >= (q)->maxlen)
-+#define	_pktq_pri(q, pri)	((q)->prio_map[pri])
-+#define	pktq_tailpri(q)	((q)->tail ? _pktq_pri(q, PKTPRIO((q)->tail)) : _pktq_pri(q, 0))
-+
-+/* externs */
-+/* packet */
-+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
-+extern uint pkttotlen(osl_t *osh, void *);
-+extern void pktq_init(struct pktq *q, uint maxlen, const uint8 prio_map[]);
-+extern void pktenq(struct pktq *q, void *p, bool lifo);
-+extern void *pktdeq(struct pktq *q);
-+extern void *pktdeqtail(struct pktq *q);
 +/* string */
 +extern uint bcm_atoi(char *s);
 +extern uchar bcm_toupper(uchar c);
@@ -1683,84 +1707,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/bcmutils.h linux-2.4.32-brcm/a
 +extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
 +
 +#endif	/* _bcmutils_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/hnddma.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/hnddma.h
---- linux-2.4.32/arch/mips/bcm947xx/include/hnddma.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/hnddma.h	2005-12-16 23:39:10.708822000 +0100
-@@ -0,0 +1,71 @@
-+/*
-+ * Generic Broadcom Home Networking Division (HND) DMA engine SW interface
-+ * This supports the following chips: BCM42xx, 44xx, 47xx .
-+ *
-+ * Copyright 2005, Broadcom Corporation      
-+ * All Rights Reserved.      
-+ *       
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
-+ * $Id$
-+ */
-+
-+#ifndef	_hnddma_h_
-+#define	_hnddma_h_
-+
-+/* export structure */
-+typedef volatile struct {
-+	/* rx error counters */
-+	uint		rxgiants;	/* rx giant frames */
-+	uint		rxnobuf;	/* rx out of dma descriptors */
-+	/* tx error counters */
-+	uint		txnobuf;	/* tx out of dma descriptors */
-+} hnddma_t;
-+
-+#ifndef di_t
-+#define	di_t	void
-+#endif
-+
-+#ifndef osl_t 
-+#define osl_t void
-+#endif
-+
-+/* externs */
-+extern void * dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx, 
-+			 uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level);
-+extern void dma_detach(di_t *di);
-+extern void dma_txreset(di_t *di);
-+extern void dma_rxreset(di_t *di);
-+extern void dma_txinit(di_t *di);
-+extern bool dma_txenabled(di_t *di);
-+extern void dma_rxinit(di_t *di);
-+extern void dma_rxenable(di_t *di);
-+extern bool dma_rxenabled(di_t *di);
-+extern void dma_txsuspend(di_t *di);
-+extern void dma_txresume(di_t *di);
-+extern bool dma_txsuspended(di_t *di);
-+extern bool dma_txsuspendedidle(di_t *di);
-+extern bool dma_txstopped(di_t *di);
-+extern bool dma_rxstopped(di_t *di);
-+extern int dma_txfast(di_t *di, void *p, uint32 coreflags);
-+extern void dma_fifoloopbackenable(di_t *di);
-+extern void *dma_rx(di_t *di);
-+extern void dma_rxfill(di_t *di);
-+extern void dma_txreclaim(di_t *di, bool forceall);
-+extern void dma_rxreclaim(di_t *di);
-+extern uintptr dma_getvar(di_t *di, char *name);
-+extern void *dma_getnexttxp(di_t *di, bool forceall);
-+extern void *dma_peeknexttxp(di_t *di);
-+extern void *dma_getnextrxp(di_t *di, bool forceall);
-+extern void dma_txblock(di_t *di);
-+extern void dma_txunblock(di_t *di);
-+extern uint dma_txactive(di_t *di);
-+extern void dma_txrotate(di_t *di);
-+
-+extern void dma_rxpiomode(dma32regs_t *);
-+extern void dma_txpioloopback(dma32regs_t *);
-+
-+
-+#endif	/* _hnddma_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/hndmips.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/hndmips.h
---- linux-2.4.32/arch/mips/bcm947xx/include/hndmips.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/hndmips.h	2005-12-16 23:39:10.708822000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/hndmips.h linux.dev/arch/mips/bcm947xx/include/hndmips.h
+--- linux.old/arch/mips/bcm947xx/include/hndmips.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/hndmips.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,16 @@
 +/*
 + * Alternate include file for HND sbmips.h since CFE also ships with
@@ -1778,9 +1727,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/hndmips.h linux-2.4.32-brcm/ar
 + */
 +
 +#include "sbmips.h"
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/linux_osl.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/linux_osl.h
---- linux-2.4.32/arch/mips/bcm947xx/include/linux_osl.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/linux_osl.h	2005-12-16 23:39:10.708822000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/linux_osl.h linux.dev/arch/mips/bcm947xx/include/linux_osl.h
+--- linux.old/arch/mips/bcm947xx/include/linux_osl.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/linux_osl.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,371 @@
 +/*
 + * Linux OS Independent Layer
@@ -2153,9 +2102,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/linux_osl.h linux-2.4.32-brcm/
 +#define	PKTBUFSZ	2048
 +
 +#endif	/* _linux_osl_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/linuxver.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/linuxver.h
---- linux-2.4.32/arch/mips/bcm947xx/include/linuxver.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/linuxver.h	2005-12-16 23:39:10.748824500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/linuxver.h linux.dev/arch/mips/bcm947xx/include/linuxver.h
+--- linux.old/arch/mips/bcm947xx/include/linuxver.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/linuxver.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,411 @@
 +/*
 + * Linux-specific abstractions to gain some independence from linux kernel versions.
@@ -2568,9 +2517,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/linuxver.h linux-2.4.32-brcm/a
 +#endif
 +
 +#endif /* _linuxver_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/mipsinc.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/mipsinc.h
---- linux-2.4.32/arch/mips/bcm947xx/include/mipsinc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/mipsinc.h	2005-12-16 23:39:10.748824500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/mipsinc.h linux.dev/arch/mips/bcm947xx/include/mipsinc.h
+--- linux.old/arch/mips/bcm947xx/include/mipsinc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/mipsinc.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,552 @@
 +/*
 + * HND Run Time Environment for standalone MIPS programs.
@@ -3124,9 +3073,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/mipsinc.h linux-2.4.32-brcm/ar
 +#endif /* !_LANGUAGE_ASSEMBLY */
 +
 +#endif	/* _MISPINC_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/osl.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/osl.h
---- linux-2.4.32/arch/mips/bcm947xx/include/osl.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/osl.h	2005-12-16 23:39:10.748824500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/osl.h linux.dev/arch/mips/bcm947xx/include/osl.h
+--- linux.old/arch/mips/bcm947xx/include/osl.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/osl.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,42 @@
 +/*
 + * OS Abstraction Layer
@@ -3170,9 +3119,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/osl.h linux-2.4.32-brcm/arch/m
 +#define	MAXPRIO		7	/* 0-7 */
 +
 +#endif	/* _osl_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/pcicfg.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/pcicfg.h
---- linux-2.4.32/arch/mips/bcm947xx/include/pcicfg.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/pcicfg.h	2005-12-16 23:39:10.752824750 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/pcicfg.h linux.dev/arch/mips/bcm947xx/include/pcicfg.h
+--- linux.old/arch/mips/bcm947xx/include/pcicfg.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/pcicfg.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,451 @@
 +/*
 + * pcicfg.h: PCI configuration  constants and structures.
@@ -3625,9 +3574,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/pcicfg.h linux-2.4.32-brcm/arc
 +#define PCI_CFG_CMD_STAT_TA	0x08000000	/* target abort status */
 +
 +#endif
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbchipc.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbchipc.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbchipc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbchipc.h	2005-12-16 23:39:10.932836000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbchipc.h linux.dev/arch/mips/bcm947xx/include/sbchipc.h
+--- linux.old/arch/mips/bcm947xx/include/sbchipc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbchipc.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,440 @@
 +/*
 + * SiliconBackplane Chipcommon core hardware definitions.
@@ -4069,9 +4018,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbchipc.h linux-2.4.32-brcm/ar
 +#define	OTP_MAGIC	0x4e56
 +
 +#endif	/* _SBCHIPC_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbconfig.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbconfig.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbconfig.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbconfig.h	2005-12-16 23:39:10.932836000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbconfig.h linux.dev/arch/mips/bcm947xx/include/sbconfig.h
+--- linux.old/arch/mips/bcm947xx/include/sbconfig.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbconfig.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,342 @@
 +/*
 + * Broadcom SiliconBackplane hardware register definitions.
@@ -4415,9 +4364,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbconfig.h linux-2.4.32-brcm/a
 +#define BISZ_SIZE		7		/* descriptor size in 32-bit intergers */
 +
 +#endif	/* _SBCONFIG_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbextif.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbextif.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbextif.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbextif.h	2005-12-16 23:39:10.932836000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbextif.h linux.dev/arch/mips/bcm947xx/include/sbextif.h
+--- linux.old/arch/mips/bcm947xx/include/sbextif.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbextif.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,242 @@
 +/*
 + * Hardware-specific External Interface I/O core definitions
@@ -4661,13 +4610,12 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbextif.h linux-2.4.32-brcm/ar
 +#define	CC_CLOCK_BASE	24000000	/* Half the clock freq. in the 4710 */
 +
 +#endif	/* _SBEXTIF_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbhnddma.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbhnddma.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbhnddma.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbhnddma.h	2005-12-16 23:39:10.932836000 +0100
-@@ -0,0 +1,312 @@
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbmemc.h linux.dev/arch/mips/bcm947xx/include/sbmemc.h
+--- linux.old/arch/mips/bcm947xx/include/sbmemc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbmemc.h	2006-04-06 15:34:14.000000000 +0200
+@@ -0,0 +1,148 @@
 +/*
-+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
-+ * This supports the following chips: BCM42xx, 44xx, 47xx .
++ * BCM47XX Sonics SiliconBackplane DDR/SDRAM controller core hardware definitions.
 + *
 + * Copyright 2005, Broadcom Corporation      
 + * All Rights Reserved.      
@@ -4676,314 +4624,344 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbhnddma.h linux-2.4.32-brcm/a
 + * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
 + * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
 + * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
++ *
 + * $Id$
 + */
 +
-+#ifndef	_sbhnddma_h_
-+#define	_sbhnddma_h_
++#ifndef	_SBMEMC_H
++#define	_SBMEMC_H
 +
-+ 
-+/* 2byte-wide pio register set per channel(xmt or rcv) */
-+typedef volatile struct {
-+	uint16	fifocontrol;
-+	uint16	fifodata;
-+	uint16	fifofree;	/* only valid in xmt channel, not in rcv channel */
-+	uint16	PAD;
-+} pio2regs_t;
++#ifdef _LANGUAGE_ASSEMBLY
 +
-+/* a pair of pio channels(tx and rx) */
-+typedef volatile struct {
-+	pio2regs_t	tx;
-+	pio2regs_t	rx;
-+} pio2regp_t;
++#define	MEMC_CONTROL		0x00
++#define	MEMC_CONFIG		0x04
++#define	MEMC_REFRESH		0x08
++#define	MEMC_BISTSTAT		0x0c
++#define	MEMC_MODEBUF		0x10
++#define	MEMC_BKCLS		0x14
++#define	MEMC_PRIORINV		0x18
++#define	MEMC_DRAMTIM		0x1c
++#define	MEMC_INTSTAT		0x20
++#define	MEMC_INTMASK		0x24
++#define	MEMC_INTINFO		0x28
++#define	MEMC_NCDLCTL		0x30
++#define	MEMC_RDNCDLCOR		0x34
++#define	MEMC_WRNCDLCOR		0x38
++#define	MEMC_MISCDLYCTL		0x3c
++#define	MEMC_DQSGATENCDL	0x40
++#define	MEMC_SPARE		0x44
++#define	MEMC_TPADDR		0x48
++#define	MEMC_TPDATA		0x4c
++#define	MEMC_BARRIER		0x50
++#define	MEMC_CORE		0x54
 +
-+/* 4byte-wide pio register set per channel(xmt or rcv) */
-+typedef volatile struct {
-+	uint32	fifocontrol;
-+	uint32	fifodata;
-+} pio4regs_t;
 +
-+/* a pair of pio channels(tx and rx) */
-+typedef volatile struct {
-+	pio4regs_t	tx;
-+	pio4regs_t	rx;
-+} pio4regp_t;
++#else
 +
++/* Sonics side: MEMC core registers */
++typedef volatile struct sbmemcregs {
++	uint32	control;
++	uint32	config;
++	uint32	refresh;
++	uint32	biststat;
++	uint32	modebuf;
++	uint32	bkcls;
++	uint32	priorinv;
++	uint32	dramtim;
++	uint32	intstat;
++	uint32	intmask;
++	uint32	intinfo;
++	uint32	reserved1;
++	uint32	ncdlctl;
++	uint32	rdncdlcor;
++	uint32	wrncdlcor;
++	uint32	miscdlyctl;
++	uint32	dqsgatencdl;
++	uint32	spare;
++	uint32	tpaddr;
++	uint32	tpdata;
++	uint32	barrier;
++	uint32	core;
++} sbmemcregs_t;
 +
++#endif
 +
-+/* DMA structure:
-+ *  support two DMA engines: 32 bits address or 64 bit addressing
-+ *  basic DMA register set is per channel(transmit or receive)
-+ *  a pair of channels is defined for convenience
-+ */
++/* MEMC Core Init values (OCP ID 0x80f) */
 +
++/* For sdr: */
++#define MEMC_SD_CONFIG_INIT	0x00048000
++#define MEMC_SD_DRAMTIM2_INIT	0x000754d8
++#define MEMC_SD_DRAMTIM3_INIT	0x000754da
++#define MEMC_SD_RDNCDLCOR_INIT	0x00000000
++#define MEMC_SD_WRNCDLCOR_INIT	0x49351200
++#define MEMC_SD1_WRNCDLCOR_INIT	0x14500200	/* For corerev 1 (4712) */
++#define MEMC_SD_MISCDLYCTL_INIT	0x00061c1b
++#define MEMC_SD1_MISCDLYCTL_INIT 0x00021416	/* For corerev 1 (4712) */
++#define MEMC_SD_CONTROL_INIT0	0x00000002
++#define MEMC_SD_CONTROL_INIT1	0x00000008
++#define MEMC_SD_CONTROL_INIT2	0x00000004
++#define MEMC_SD_CONTROL_INIT3	0x00000010
++#define MEMC_SD_CONTROL_INIT4	0x00000001
++#define MEMC_SD_MODEBUF_INIT	0x00000000
++#define MEMC_SD_REFRESH_INIT	0x0000840f
 +
-+/*** 32 bits addressing ***/ 
 +
-+/* dma registers per channel(xmt or rcv) */
-+typedef volatile struct {
-+	uint32	control;		/* enable, et al */
-+	uint32	addr;			/* descriptor ring base address (4K aligned) */
-+	uint32	ptr;			/* last descriptor posted to chip */
-+	uint32	status;			/* current active descriptor, et al */
-+} dma32regs_t;
++/* This is for SDRM8X8X4 */
++#define	MEMC_SDR_INIT		0x0008
++#define	MEMC_SDR_MODE		0x32
++#define	MEMC_SDR_NCDL		0x00020032
++#define	MEMC_SDR1_NCDL		0x0002020f	/* For corerev 1 (4712) */
 +
-+typedef volatile struct {
-+	dma32regs_t	xmt;		/* dma tx channel */
-+	dma32regs_t	rcv;		/* dma rx channel */
-+} dma32regp_t;
++/* For ddr: */
++#define MEMC_CONFIG_INIT	0x00048000
++#define MEMC_DRAMTIM2_INIT	0x000754d8
++#define MEMC_DRAMTIM25_INIT	0x000754d9
++#define MEMC_RDNCDLCOR_INIT	0x00000000
++#define MEMC_RDNCDLCOR_SIMINIT	0xf6f6f6f6	/* For hdl sim */
++#define MEMC_WRNCDLCOR_INIT	0x49351200
++#define MEMC_1_WRNCDLCOR_INIT	0x14500200
++#define MEMC_DQSGATENCDL_INIT	0x00030000
++#define MEMC_MISCDLYCTL_INIT	0x21061c1b
++#define MEMC_1_MISCDLYCTL_INIT	0x21021400
++#define MEMC_NCDLCTL_INIT	0x00002001
++#define MEMC_CONTROL_INIT0	0x00000002
++#define MEMC_CONTROL_INIT1	0x00000008
++#define MEMC_MODEBUF_INIT0	0x00004000
++#define MEMC_CONTROL_INIT2	0x00000010
++#define MEMC_MODEBUF_INIT1	0x00000100
++#define MEMC_CONTROL_INIT3	0x00000010
++#define MEMC_CONTROL_INIT4	0x00000008
++#define MEMC_REFRESH_INIT	0x0000840f
++#define MEMC_CONTROL_INIT5	0x00000004
++#define MEMC_MODEBUF_INIT2	0x00000000
++#define MEMC_CONTROL_INIT6	0x00000010
++#define MEMC_CONTROL_INIT7	0x00000001
 +
-+typedef volatile struct {	/* diag access */
-+	uint32	fifoaddr;		/* diag address */
-+	uint32	fifodatalow;		/* low 32bits of data */
-+	uint32	fifodatahigh;		/* high 32bits of data */
-+	uint32	pad;			/* reserved */
-+} dma32diag_t;
 +
-+/*
-+ * DMA Descriptor
-+ * Descriptors are only read by the hardware, never written back.
-+ */
-+typedef volatile struct {
-+	uint32	ctrl;		/* misc control bits & bufcount */
-+	uint32	addr;		/* data buffer address */
-+} dma32dd_t;
++/* This is for DDRM16X16X2 */
++#define	MEMC_DDR_INIT		0x0009
++#define	MEMC_DDR_MODE		0x62
++#define	MEMC_DDR_NCDL		0x0005050a
++#define	MEMC_DDR1_NCDL		0x00000a0a	/* For corerev 1 (4712) */
 +
-+/*
-+ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
-+ */
-+#define	D32MAXRINGSZ	4096
-+#define	D32RINGALIGN	4096
-+#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
++/* mask for sdr/ddr calibration registers */
++#define MEMC_RDNCDLCOR_RD_MASK	0x000000ff
++#define MEMC_WRNCDLCOR_WR_MASK	0x000000ff
++#define MEMC_DQSGATENCDL_G_MASK	0x000000ff
 +
-+/* transmit channel control */
-+#define	XC_XE		((uint32)1 << 0)	/* transmit enable */
-+#define	XC_SE		((uint32)1 << 1)	/* transmit suspend request */
-+#define	XC_LE		((uint32)1 << 2)	/* loopback enable */
-+#define	XC_FL		((uint32)1 << 4)	/* flush request */
-+#define	XC_AE		((uint32)3 << 16)	/* address extension bits */
-+#define	XC_AE_SHIFT	16
++/* masks for miscdlyctl registers */
++#define MEMC_MISC_SM_MASK	0x30000000
++#define MEMC_MISC_SM_SHIFT	28
++#define MEMC_MISC_SD_MASK	0x0f000000
++#define MEMC_MISC_SD_SHIFT	24
 +
-+/* transmit descriptor table pointer */
-+#define	XP_LD_MASK	0xfff			/* last valid descriptor */
++/* hw threshhold for calculating wr/rd for sdr memc */
++#define MEMC_CD_THRESHOLD	128
 +
-+/* transmit channel status */
-+#define	XS_CD_MASK	0x0fff			/* current descriptor pointer */
-+#define	XS_XS_MASK	0xf000			/* transmit state */
-+#define	XS_XS_SHIFT	12
-+#define	XS_XS_DISABLED	0x0000			/* disabled */
-+#define	XS_XS_ACTIVE	0x1000			/* active */
-+#define	XS_XS_IDLE	0x2000			/* idle wait */
-+#define	XS_XS_STOPPED	0x3000			/* stopped */
-+#define	XS_XS_SUSP	0x4000			/* suspend pending */
-+#define	XS_XE_MASK	0xf0000			/* transmit errors */
-+#define	XS_XE_SHIFT	16
-+#define	XS_XE_NOERR	0x00000			/* no error */
-+#define	XS_XE_DPE	0x10000			/* descriptor protocol error */
-+#define	XS_XE_DFU	0x20000			/* data fifo underrun */
-+#define	XS_XE_BEBR	0x30000			/* bus error on buffer read */
-+#define	XS_XE_BEDA	0x40000			/* bus error on descriptor access */
-+#define	XS_AD_MASK	0xfff00000		/* active descriptor */
-+#define	XS_AD_SHIFT	20
++/* Low bit of init register says if memc is ddr or sdr */
++#define MEMC_CONFIG_DDR		0x00000001
 +
-+/* receive channel control */
-+#define	RC_RE		((uint32)1 << 0)	/* receive enable */
-+#define	RC_RO_MASK	0xfe			/* receive frame offset */
-+#define	RC_RO_SHIFT	1
-+#define	RC_FM		((uint32)1 << 8)	/* direct fifo receive (pio) mode */
-+#define	RC_AE		((uint32)3 << 16)	/* address extension bits */
-+#define	RC_AE_SHIFT	16
++#endif	/* _SBMEMC_H */
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbmips.h linux.dev/arch/mips/bcm947xx/include/sbmips.h
+--- linux.old/arch/mips/bcm947xx/include/sbmips.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbmips.h	2006-04-06 15:34:14.000000000 +0200
+@@ -0,0 +1,62 @@
++/*
++ * Broadcom SiliconBackplane MIPS definitions
++ *
++ * SB MIPS cores are custom MIPS32 processors with SiliconBackplane
++ * OCP interfaces. The CP0 processor ID is 0x00024000, where bits
++ * 23:16 mean Broadcom and bits 15:8 mean a MIPS core with an OCP
++ * interface. The core revision is stored in the SB ID register in SB
++ * configuration space.
++ *
++ * Copyright 2005, Broadcom Corporation
++ * All Rights Reserved.
++ * 
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
++ *
++ * $Id$
++ */
 +
-+/* receive descriptor table pointer */
-+#define	RP_LD_MASK	0xfff			/* last valid descriptor */
++#ifndef	_SBMIPS_H
++#define	_SBMIPS_H
 +
-+/* receive channel status */
-+#define	RS_CD_MASK	0x0fff			/* current descriptor pointer */
-+#define	RS_RS_MASK	0xf000			/* receive state */
-+#define	RS_RS_SHIFT	12
-+#define	RS_RS_DISABLED	0x0000			/* disabled */
-+#define	RS_RS_ACTIVE	0x1000			/* active */
-+#define	RS_RS_IDLE	0x2000			/* idle wait */
-+#define	RS_RS_STOPPED	0x3000			/* reserved */
-+#define	RS_RE_MASK	0xf0000			/* receive errors */
-+#define	RS_RE_SHIFT	16
-+#define	RS_RE_NOERR	0x00000			/* no error */
-+#define	RS_RE_DPE	0x10000			/* descriptor protocol error */
-+#define	RS_RE_DFO	0x20000			/* data fifo overflow */
-+#define	RS_RE_BEBW	0x30000			/* bus error on buffer write */
-+#define	RS_RE_BEDA	0x40000			/* bus error on descriptor access */
-+#define	RS_AD_MASK	0xfff00000		/* active descriptor */
-+#define	RS_AD_SHIFT	20
++#include <mipsinc.h>
 +
-+/* fifoaddr */
-+#define	FA_OFF_MASK	0xffff			/* offset */
-+#define	FA_SEL_MASK	0xf0000			/* select */
-+#define	FA_SEL_SHIFT	16
-+#define	FA_SEL_XDD	0x00000			/* transmit dma data */
-+#define	FA_SEL_XDP	0x10000			/* transmit dma pointers */
-+#define	FA_SEL_RDD	0x40000			/* receive dma data */
-+#define	FA_SEL_RDP	0x50000			/* receive dma pointers */
-+#define	FA_SEL_XFD	0x80000			/* transmit fifo data */
-+#define	FA_SEL_XFP	0x90000			/* transmit fifo pointers */
-+#define	FA_SEL_RFD	0xc0000			/* receive fifo data */
-+#define	FA_SEL_RFP	0xd0000			/* receive fifo pointers */
-+#define	FA_SEL_RSD	0xe0000			/* receive frame status data */
-+#define	FA_SEL_RSP	0xf0000			/* receive frame status pointers */
++#ifndef _LANGUAGE_ASSEMBLY
 +
-+/* descriptor control flags */
-+#define	CTRL_BC_MASK	0x1fff			/* buffer byte count */
-+#define	CTRL_AE		((uint32)3 << 16)	/* address extension bits */
-+#define	CTRL_AE_SHIFT	16
-+#define	CTRL_EOT	((uint32)1 << 28)	/* end of descriptor table */
-+#define	CTRL_IOC	((uint32)1 << 29)	/* interrupt on completion */
-+#define	CTRL_EOF	((uint32)1 << 30)	/* end of frame */
-+#define	CTRL_SOF	((uint32)1 << 31)	/* start of frame */
++/* cpp contortions to concatenate w/arg prescan */
++#ifndef PAD
++#define	_PADLINE(line)	pad ## line
++#define	_XSTR(line)	_PADLINE(line)
++#define	PAD		_XSTR(__LINE__)
++#endif	/* PAD */
 +
-+/* control flags in the range [27:20] are core-specific and not defined here */
-+#define	CTRL_CORE_MASK	0x0ff00000
++typedef volatile struct {
++	uint32	corecontrol;
++	uint32	PAD[2];
++	uint32	biststatus;
++	uint32	PAD[4];
++	uint32	intstatus;
++	uint32	intmask;
++	uint32	timer;
++} mipsregs_t;
 +
-+/*** 64 bits addressing ***/
++extern uint32 sb_flag(sb_t *sbh);
++extern uint sb_irq(sb_t *sbh);
 +
-+/* dma registers per channel(xmt or rcv) */
-+typedef volatile struct {
-+	uint32	control;		/* enable, et al */
-+	uint32	ptr;			/* last descriptor posted to chip */
-+	uint32	addrlow;		/* descriptor ring base address low 32-bits (8K aligned) */
-+	uint32	addrhigh;		/* descriptor ring base address bits 63:32 (8K aligned) */
-+	uint32	status0;		/* current descriptor, xmt state */
-+	uint32	status1;		/* active descriptor, xmt error */
-+} dma64regs_t;
++extern void BCMINIT(sb_serial_init)(sb_t *sbh, void (*add)(void *regs, uint irq, uint baud_base, uint reg_shift));
 +
-+typedef volatile struct {
-+	dma64regs_t	tx;		/* dma64 tx channel */
-+	dma64regs_t	rx;		/* dma64 rx channel */
-+} dma64regp_t;
++extern void *sb_jtagm_init(sb_t *sbh, uint clkd, bool exttap);
++extern void sb_jtagm_disable(void *h);
++extern uint32 jtag_rwreg(void *h, uint32 ir, uint32 dr);
++extern void BCMINIT(sb_mips_init)(sb_t *sbh);
++extern uint32 BCMINIT(sb_mips_clock)(sb_t *sbh);
++extern bool BCMINIT(sb_mips_setclock)(sb_t *sbh, uint32 mipsclock, uint32 sbclock, uint32 pciclock);
++extern void BCMINIT(enable_pfc)(uint32 mode);
++extern uint32 BCMINIT(sb_memc_get_ncdl)(sb_t *sbh);
 +
-+typedef volatile struct {		/* diag access */
-+	uint32	fifoaddr;		/* diag address */
-+	uint32	fifodatalow;		/* low 32bits of data */
-+	uint32	fifodatahigh;		/* high 32bits of data */
-+	uint32	pad;			/* reserved */
-+} dma64diag_t;
 +
++#endif /* _LANGUAGE_ASSEMBLY */
++
++#endif	/* _SBMIPS_H */
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbpci.h linux.dev/arch/mips/bcm947xx/include/sbpci.h
+--- linux.old/arch/mips/bcm947xx/include/sbpci.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbpci.h	2006-04-06 15:34:14.000000000 +0200
+@@ -0,0 +1,122 @@
 +/*
-+ * DMA Descriptor
-+ * Descriptors are only read by the hardware, never written back.
-+ */
-+typedef volatile struct {
-+	uint32	ctrl1;		/* misc control bits & bufcount */
-+	uint32	ctrl2;		/* buffer count and address extension */
-+	uint32	addrlow;	/* memory address of the first byte of the date buffer, bits 31:0 */
-+	uint32	addrhigh;	/* memory address of the first byte of the date buffer, bits 63:32 */
-+} dma64dd_t;
-+
-+/*
-+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
++ * BCM47XX Sonics SiliconBackplane PCI core hardware definitions.
++ *
++ * $Id$
++ * Copyright 2005, Broadcom Corporation      
++ * All Rights Reserved.      
++ *       
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
 + */
-+#define	D64MAXRINGSZ	8192
-+#define	D64RINGALIGN	8192
-+#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
 +
-+/* transmit channel control */
-+#define	D64_XC_XE		0x00000001	/* transmit enable */
-+#define	D64_XC_SE		0x00000002	/* transmit suspend request */
-+#define	D64_XC_LE		0x00000004	/* loopback enable */
-+#define	D64_XC_FL		0x00000010	/* flush request */
-+#define	D64_XC_AE		0x00110000	/* address extension bits */
-+#define	D64_XC_AE_SHIFT		16
++#ifndef	_SBPCI_H
++#define	_SBPCI_H
 +
-+/* transmit descriptor table pointer */
-+#define	D64_XP_LD_MASK		0x00000fff	/* last valid descriptor */
++/* cpp contortions to concatenate w/arg prescan */
++#ifndef PAD
++#define	_PADLINE(line)	pad ## line
++#define	_XSTR(line)	_PADLINE(line)
++#define	PAD		_XSTR(__LINE__)
++#endif
 +
-+/* transmit channel status */
-+#define	D64_XS0_CD_MASK		0x00001fff	/* current descriptor pointer */
-+#define	D64_XS0_XS_MASK		0xf0000000     	/* transmit state */
-+#define	D64_XS0_XS_SHIFT		28
-+#define	D64_XS0_XS_DISABLED	0x00000000	/* disabled */
-+#define	D64_XS0_XS_ACTIVE	0x10000000	/* active */
-+#define	D64_XS0_XS_IDLE		0x20000000	/* idle wait */
-+#define	D64_XS0_XS_STOPPED	0x30000000	/* stopped */
-+#define	D64_XS0_XS_SUSP		0x40000000	/* suspend pending */
++/* Sonics side: PCI core and host control registers */
++typedef struct sbpciregs {
++	uint32 control;		/* PCI control */
++	uint32 PAD[3];
++	uint32 arbcontrol;	/* PCI arbiter control */
++	uint32 PAD[3];
++	uint32 intstatus;	/* Interrupt status */
++	uint32 intmask;		/* Interrupt mask */
++	uint32 sbtopcimailbox;	/* Sonics to PCI mailbox */
++	uint32 PAD[9];
++	uint32 bcastaddr;	/* Sonics broadcast address */
++	uint32 bcastdata;	/* Sonics broadcast data */
++	uint32 PAD[2];
++	uint32 gpioin;		/* ro: gpio input (>=rev2) */
++	uint32 gpioout;		/* rw: gpio output (>=rev2) */
++	uint32 gpioouten;	/* rw: gpio output enable (>= rev2) */
++	uint32 gpiocontrol;	/* rw: gpio control (>= rev2) */
++	uint32 PAD[36];
++	uint32 sbtopci0;	/* Sonics to PCI translation 0 */
++	uint32 sbtopci1;	/* Sonics to PCI translation 1 */
++	uint32 sbtopci2;	/* Sonics to PCI translation 2 */
++	uint32 PAD[445];
++	uint16 sprom[36];	/* SPROM shadow Area */
++	uint32 PAD[46];
++} sbpciregs_t;
 +
-+#define	D64_XS1_AD_MASK		0x0001ffff	/* active descriptor */
-+#define	D64_XS1_XE_MASK		0xf0000000     	/* transmit errors */
-+#define	D64_XS1_XE_SHIFT		28
-+#define	D64_XS1_XE_NOERR	0x00000000	/* no error */
-+#define	D64_XS1_XE_DPE		0x10000000	/* descriptor protocol error */
-+#define	D64_XS1_XE_DFU		0x20000000	/* data fifo underrun */
-+#define	D64_XS1_XE_DTE		0x30000000	/* data transfer error */
-+#define	D64_XS1_XE_DESRE	0x40000000	/* descriptor read error */
-+#define	D64_XS1_XE_COREE	0x50000000	/* core error */
++/* PCI control */
++#define PCI_RST_OE	0x01	/* When set, drives PCI_RESET out to pin */
++#define PCI_RST		0x02	/* Value driven out to pin */
++#define PCI_CLK_OE	0x04	/* When set, drives clock as gated by PCI_CLK out to pin */
++#define PCI_CLK		0x08	/* Gate for clock driven out to pin */	
 +
-+/* receive channel control */
-+#define	D64_RC_RE		0x00000001	/* receive enable */
-+#define	D64_RC_RO_MASK		0x000000fe	/* receive frame offset */
-+#define	D64_RC_RO_SHIFT		1
-+#define	D64_RC_FM		0x00000100	/* direct fifo receive (pio) mode */
-+#define	D64_RC_AE		0x00110000	/* address extension bits */
-+#define	D64_RC_AE_SHIFT		16
++/* PCI arbiter control */
++#define PCI_INT_ARB	0x01	/* When set, use an internal arbiter */
++#define PCI_EXT_ARB	0x02	/* When set, use an external arbiter */
++#define PCI_PARKID_MASK	0x06	/* Selects which agent is parked on an idle bus */
++#define PCI_PARKID_SHIFT   1
++#define PCI_PARKID_LAST	   0	/* Last requestor */
++#define PCI_PARKID_4710	   1	/* 4710 */
++#define PCI_PARKID_EXTREQ0 2	/* External requestor 0 */
++#define PCI_PARKID_EXTREQ1 3	/* External requestor 1 */
 +
-+/* receive descriptor table pointer */
-+#define	D64_RP_LD_MASK		0x00000fff	/* last valid descriptor */
++/* Interrupt status/mask */
++#define PCI_INTA	0x01	/* PCI INTA# is asserted */
++#define PCI_INTB	0x02	/* PCI INTB# is asserted */
++#define PCI_SERR	0x04	/* PCI SERR# has been asserted (write one to clear) */
++#define PCI_PERR	0x08	/* PCI PERR# has been asserted (write one to clear) */
++#define PCI_PME		0x10	/* PCI PME# is asserted */
 +
-+/* receive channel status */
-+#define	D64_RS0_CD_MASK		0x00001fff	/* current descriptor pointer */
-+#define	D64_RS0_RS_MASK		0xf0000000     	/* receive state */
-+#define	D64_RS0_RS_SHIFT		28
-+#define	D64_RS0_RS_DISABLED	0x00000000	/* disabled */
-+#define	D64_RS0_RS_ACTIVE	0x10000000	/* active */
-+#define	D64_RS0_RS_IDLE		0x20000000	/* idle wait */
-+#define	D64_RS0_RS_STOPPED	0x30000000	/* stopped */
-+#define	D64_RS0_RS_SUSP		0x40000000	/* suspend pending */
++/* (General) PCI/SB mailbox interrupts, two bits per pci function */
++#define	MAILBOX_F0_0	0x100	/* function 0, int 0 */
++#define	MAILBOX_F0_1	0x200	/* function 0, int 1 */
++#define	MAILBOX_F1_0	0x400	/* function 1, int 0 */
++#define	MAILBOX_F1_1	0x800	/* function 1, int 1 */
++#define	MAILBOX_F2_0	0x1000	/* function 2, int 0 */
++#define	MAILBOX_F2_1	0x2000	/* function 2, int 1 */
++#define	MAILBOX_F3_0	0x4000	/* function 3, int 0 */
++#define	MAILBOX_F3_1	0x8000	/* function 3, int 1 */
 +
-+#define	D64_RS1_AD_MASK		0x0001ffff	/* active descriptor */
-+#define	D64_RS1_RE_MASK		0xf0000000     	/* receive errors */
-+#define	D64_RS1_RE_SHIFT		28
-+#define	D64_RS1_RE_NOERR	0x00000000	/* no error */
-+#define	D64_RS1_RE_DPO		0x10000000	/* descriptor protocol error */
-+#define	D64_RS1_RE_DFU		0x20000000	/* data fifo overflow */
-+#define	D64_RS1_RE_DTE		0x30000000	/* data transfer error */
-+#define	D64_RS1_RE_DESRE	0x40000000	/* descriptor read error */
-+#define	D64_RS1_RE_COREE	0x50000000	/* core error */
++/* Sonics broadcast address */
++#define BCAST_ADDR_MASK	0xff	/* Broadcast register address */
 +
-+/* fifoaddr */
-+#define	D64_FA_OFF_MASK		0xffff		/* offset */
-+#define	D64_FA_SEL_MASK		0xf0000		/* select */
-+#define	D64_FA_SEL_SHIFT	16
-+#define	D64_FA_SEL_XDD		0x00000		/* transmit dma data */
-+#define	D64_FA_SEL_XDP		0x10000		/* transmit dma pointers */
-+#define	D64_FA_SEL_RDD		0x40000		/* receive dma data */
-+#define	D64_FA_SEL_RDP		0x50000		/* receive dma pointers */
-+#define	D64_FA_SEL_XFD		0x80000		/* transmit fifo data */
-+#define	D64_FA_SEL_XFP		0x90000		/* transmit fifo pointers */
-+#define	D64_FA_SEL_RFD		0xc0000		/* receive fifo data */
-+#define	D64_FA_SEL_RFP		0xd0000		/* receive fifo pointers */
-+#define	D64_FA_SEL_RSD		0xe0000		/* receive frame status data */
-+#define	D64_FA_SEL_RSP		0xf0000		/* receive frame status pointers */
++/* Sonics to PCI translation types */
++#define SBTOPCI0_MASK	0xfc000000
++#define SBTOPCI1_MASK	0xfc000000
++#define SBTOPCI2_MASK	0xc0000000
++#define SBTOPCI_MEM	0
++#define SBTOPCI_IO	1
++#define SBTOPCI_CFG0	2
++#define SBTOPCI_CFG1	3
++#define	SBTOPCI_PREF	0x4		/* prefetch enable */
++#define	SBTOPCI_BURST	0x8		/* burst enable */
++#define	SBTOPCI_RC_MASK		0x30	/* read command (>= rev11) */
++#define	SBTOPCI_RC_READ		0x00	/* memory read */
++#define	SBTOPCI_RC_READLINE	0x10	/* memory read line */
++#define	SBTOPCI_RC_READMULTI	0x20	/* memory read multiple */
 +
-+/* descriptor control flags 1 */
-+#define	D64_CTRL1_EOT		((uint32)1 << 28)	/* end of descriptor table */
-+#define	D64_CTRL1_IOC		((uint32)1 << 29)	/* interrupt on completion */
-+#define	D64_CTRL1_EOF		((uint32)1 << 30)	/* end of frame */
-+#define	D64_CTRL1_SOF		((uint32)1 << 31)	/* start of frame */
++/* PCI core index in SROM shadow area */
++#define SRSH_PI_OFFSET	0	/* first word */
++#define SRSH_PI_MASK	0xf000	/* bit 15:12 */
++#define SRSH_PI_SHIFT	12	/* bit 15:12 */
 +
-+/* descriptor control flags 2 */
-+#define	D64_CTRL2_BC_MASK	0x00007fff	/* buffer byte count mask */
-+#define	D64_CTRL2_AE		0x00110000	/* address extension bits */
-+#define	D64_CTRL2_AE_SHIFT	16
++/* PCI side: Reserved PCI configuration registers (see pcicfg.h) */
++#define cap_list	rsvd_a[0]
++#define bar0_window	dev_dep[0x80 - 0x40]
++#define bar1_window	dev_dep[0x84 - 0x40]
++#define sprom_control	dev_dep[0x88 - 0x40]
 +
-+/* control flags in the range [27:20] are core-specific and not defined here */
-+#define	D64_CTRL_CORE_MASK	0x0ff00000
++#ifndef _LANGUAGE_ASSEMBLY
++
++extern int sbpci_read_config(sb_t *sbh, uint bus, uint dev, uint func, uint off, void *buf, int len);
++extern int sbpci_write_config(sb_t *sbh, uint bus, uint dev, uint func, uint off, void *buf, int len);
++extern void sbpci_ban(uint16 core);
++extern int sbpci_init(sb_t *sbh);
++extern void sbpci_check(sb_t *sbh);
 +
++#endif /* !_LANGUAGE_ASSEMBLY */
 +
-+#endif	/* _sbhnddma_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbmemc.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbmemc.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbmemc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbmemc.h	2005-12-16 23:39:10.932836000 +0100
-@@ -0,0 +1,148 @@
++#endif	/* _SBPCI_H */
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbpcie.h linux.dev/arch/mips/bcm947xx/include/sbpcie.h
+--- linux.old/arch/mips/bcm947xx/include/sbpcie.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbpcie.h	2006-04-06 15:34:14.000000000 +0200
+@@ -0,0 +1,199 @@
 +/*
-+ * BCM47XX Sonics SiliconBackplane DDR/SDRAM controller core hardware definitions.
++ * BCM43XX SiliconBackplane PCIE core hardware definitions.
 + *
++ * $Id: 
 + * Copyright 2005, Broadcom Corporation      
 + * All Rights Reserved.      
 + *       
@@ -4991,254 +4969,35 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbmemc.h linux-2.4.32-brcm/arc
 + * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
 + * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
 + * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
-+ *
-+ * $Id$
 + */
 +
-+#ifndef	_SBMEMC_H
-+#define	_SBMEMC_H
++#ifndef	_SBPCIE_H
++#define	_SBPCIE_H
 +
-+#ifdef _LANGUAGE_ASSEMBLY
++/* cpp contortions to concatenate w/arg prescan */
++#ifndef PAD
++#define	_PADLINE(line)	pad ## line
++#define	_XSTR(line)	_PADLINE(line)
++#define	PAD		_XSTR(__LINE__)
++#endif
 +
-+#define	MEMC_CONTROL		0x00
-+#define	MEMC_CONFIG		0x04
-+#define	MEMC_REFRESH		0x08
-+#define	MEMC_BISTSTAT		0x0c
-+#define	MEMC_MODEBUF		0x10
-+#define	MEMC_BKCLS		0x14
-+#define	MEMC_PRIORINV		0x18
-+#define	MEMC_DRAMTIM		0x1c
-+#define	MEMC_INTSTAT		0x20
-+#define	MEMC_INTMASK		0x24
-+#define	MEMC_INTINFO		0x28
-+#define	MEMC_NCDLCTL		0x30
-+#define	MEMC_RDNCDLCOR		0x34
-+#define	MEMC_WRNCDLCOR		0x38
-+#define	MEMC_MISCDLYCTL		0x3c
-+#define	MEMC_DQSGATENCDL	0x40
-+#define	MEMC_SPARE		0x44
-+#define	MEMC_TPADDR		0x48
-+#define	MEMC_TPDATA		0x4c
-+#define	MEMC_BARRIER		0x50
-+#define	MEMC_CORE		0x54
++/* PCIE Enumeration space offsets*/
++#define  PCIE_CORE_CONFIG_OFFSET	0x0
++#define  PCIE_FUNC0_CONFIG_OFFSET	0x400
++#define  PCIE_FUNC1_CONFIG_OFFSET	0x500
++#define  PCIE_FUNC2_CONFIG_OFFSET	0x600
++#define  PCIE_FUNC3_CONFIG_OFFSET	0x700
++#define  PCIE_SPROM_SHADOW_OFFSET	0x800
++#define  PCIE_SBCONFIG_OFFSET		0xE00	
 +
++/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
++#define PCIE_BAR0_WINMAPCORE_OFFSET	0x0
++#define PCIE_BAR0_EXTSPROM_OFFSET	0x1000
++#define PCIE_BAR0_PCIECORE_OFFSET	0x2000
++#define PCIE_BAR0_CCCOREREG_OFFSET	0x3000
 +
-+#else
-+
-+/* Sonics side: MEMC core registers */
-+typedef volatile struct sbmemcregs {
-+	uint32	control;
-+	uint32	config;
-+	uint32	refresh;
-+	uint32	biststat;
-+	uint32	modebuf;
-+	uint32	bkcls;
-+	uint32	priorinv;
-+	uint32	dramtim;
-+	uint32	intstat;
-+	uint32	intmask;
-+	uint32	intinfo;
-+	uint32	reserved1;
-+	uint32	ncdlctl;
-+	uint32	rdncdlcor;
-+	uint32	wrncdlcor;
-+	uint32	miscdlyctl;
-+	uint32	dqsgatencdl;
-+	uint32	spare;
-+	uint32	tpaddr;
-+	uint32	tpdata;
-+	uint32	barrier;
-+	uint32	core;
-+} sbmemcregs_t;
-+
-+#endif
-+
-+/* MEMC Core Init values (OCP ID 0x80f) */
-+
-+/* For sdr: */
-+#define MEMC_SD_CONFIG_INIT	0x00048000
-+#define MEMC_SD_DRAMTIM2_INIT	0x000754d8
-+#define MEMC_SD_DRAMTIM3_INIT	0x000754da
-+#define MEMC_SD_RDNCDLCOR_INIT	0x00000000
-+#define MEMC_SD_WRNCDLCOR_INIT	0x49351200
-+#define MEMC_SD1_WRNCDLCOR_INIT	0x14500200	/* For corerev 1 (4712) */
-+#define MEMC_SD_MISCDLYCTL_INIT	0x00061c1b
-+#define MEMC_SD1_MISCDLYCTL_INIT 0x00021416	/* For corerev 1 (4712) */
-+#define MEMC_SD_CONTROL_INIT0	0x00000002
-+#define MEMC_SD_CONTROL_INIT1	0x00000008
-+#define MEMC_SD_CONTROL_INIT2	0x00000004
-+#define MEMC_SD_CONTROL_INIT3	0x00000010
-+#define MEMC_SD_CONTROL_INIT4	0x00000001
-+#define MEMC_SD_MODEBUF_INIT	0x00000000
-+#define MEMC_SD_REFRESH_INIT	0x0000840f
-+
-+
-+/* This is for SDRM8X8X4 */
-+#define	MEMC_SDR_INIT		0x0008
-+#define	MEMC_SDR_MODE		0x32
-+#define	MEMC_SDR_NCDL		0x00020032
-+#define	MEMC_SDR1_NCDL		0x0002020f	/* For corerev 1 (4712) */
-+
-+/* For ddr: */
-+#define MEMC_CONFIG_INIT	0x00048000
-+#define MEMC_DRAMTIM2_INIT	0x000754d8
-+#define MEMC_DRAMTIM25_INIT	0x000754d9
-+#define MEMC_RDNCDLCOR_INIT	0x00000000
-+#define MEMC_RDNCDLCOR_SIMINIT	0xf6f6f6f6	/* For hdl sim */
-+#define MEMC_WRNCDLCOR_INIT	0x49351200
-+#define MEMC_1_WRNCDLCOR_INIT	0x14500200
-+#define MEMC_DQSGATENCDL_INIT	0x00030000
-+#define MEMC_MISCDLYCTL_INIT	0x21061c1b
-+#define MEMC_1_MISCDLYCTL_INIT	0x21021400
-+#define MEMC_NCDLCTL_INIT	0x00002001
-+#define MEMC_CONTROL_INIT0	0x00000002
-+#define MEMC_CONTROL_INIT1	0x00000008
-+#define MEMC_MODEBUF_INIT0	0x00004000
-+#define MEMC_CONTROL_INIT2	0x00000010
-+#define MEMC_MODEBUF_INIT1	0x00000100
-+#define MEMC_CONTROL_INIT3	0x00000010
-+#define MEMC_CONTROL_INIT4	0x00000008
-+#define MEMC_REFRESH_INIT	0x0000840f
-+#define MEMC_CONTROL_INIT5	0x00000004
-+#define MEMC_MODEBUF_INIT2	0x00000000
-+#define MEMC_CONTROL_INIT6	0x00000010
-+#define MEMC_CONTROL_INIT7	0x00000001
-+
-+
-+/* This is for DDRM16X16X2 */
-+#define	MEMC_DDR_INIT		0x0009
-+#define	MEMC_DDR_MODE		0x62
-+#define	MEMC_DDR_NCDL		0x0005050a
-+#define	MEMC_DDR1_NCDL		0x00000a0a	/* For corerev 1 (4712) */
-+
-+/* mask for sdr/ddr calibration registers */
-+#define MEMC_RDNCDLCOR_RD_MASK	0x000000ff
-+#define MEMC_WRNCDLCOR_WR_MASK	0x000000ff
-+#define MEMC_DQSGATENCDL_G_MASK	0x000000ff
-+
-+/* masks for miscdlyctl registers */
-+#define MEMC_MISC_SM_MASK	0x30000000
-+#define MEMC_MISC_SM_SHIFT	28
-+#define MEMC_MISC_SD_MASK	0x0f000000
-+#define MEMC_MISC_SD_SHIFT	24
-+
-+/* hw threshhold for calculating wr/rd for sdr memc */
-+#define MEMC_CD_THRESHOLD	128
-+
-+/* Low bit of init register says if memc is ddr or sdr */
-+#define MEMC_CONFIG_DDR		0x00000001
-+
-+#endif	/* _SBMEMC_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbmips.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbmips.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbmips.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbmips.h	2005-12-16 23:39:10.936836250 +0100
-@@ -0,0 +1,62 @@
-+/*
-+ * Broadcom SiliconBackplane MIPS definitions
-+ *
-+ * SB MIPS cores are custom MIPS32 processors with SiliconBackplane
-+ * OCP interfaces. The CP0 processor ID is 0x00024000, where bits
-+ * 23:16 mean Broadcom and bits 15:8 mean a MIPS core with an OCP
-+ * interface. The core revision is stored in the SB ID register in SB
-+ * configuration space.
-+ *
-+ * Copyright 2005, Broadcom Corporation
-+ * All Rights Reserved.
-+ * 
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+ *
-+ * $Id$
-+ */
-+
-+#ifndef	_SBMIPS_H
-+#define	_SBMIPS_H
-+
-+#include <mipsinc.h>
-+
-+#ifndef _LANGUAGE_ASSEMBLY
-+
-+/* cpp contortions to concatenate w/arg prescan */
-+#ifndef PAD
-+#define	_PADLINE(line)	pad ## line
-+#define	_XSTR(line)	_PADLINE(line)
-+#define	PAD		_XSTR(__LINE__)
-+#endif	/* PAD */
-+
-+typedef volatile struct {
-+	uint32	corecontrol;
-+	uint32	PAD[2];
-+	uint32	biststatus;
-+	uint32	PAD[4];
-+	uint32	intstatus;
-+	uint32	intmask;
-+	uint32	timer;
-+} mipsregs_t;
-+
-+extern uint32 sb_flag(sb_t *sbh);
-+extern uint sb_irq(sb_t *sbh);
-+
-+extern void BCMINIT(sb_serial_init)(sb_t *sbh, void (*add)(void *regs, uint irq, uint baud_base, uint reg_shift));
-+
-+extern void *sb_jtagm_init(sb_t *sbh, uint clkd, bool exttap);
-+extern void sb_jtagm_disable(void *h);
-+extern uint32 jtag_rwreg(void *h, uint32 ir, uint32 dr);
-+extern void BCMINIT(sb_mips_init)(sb_t *sbh);
-+extern uint32 BCMINIT(sb_mips_clock)(sb_t *sbh);
-+extern bool BCMINIT(sb_mips_setclock)(sb_t *sbh, uint32 mipsclock, uint32 sbclock, uint32 pciclock);
-+extern void BCMINIT(enable_pfc)(uint32 mode);
-+extern uint32 BCMINIT(sb_memc_get_ncdl)(sb_t *sbh);
-+
-+
-+#endif /* _LANGUAGE_ASSEMBLY */
-+
-+#endif	/* _SBMIPS_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbpcie.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbpcie.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbpcie.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbpcie.h	2005-12-16 23:39:10.936836250 +0100
-@@ -0,0 +1,199 @@
-+/*
-+ * BCM43XX SiliconBackplane PCIE core hardware definitions.
-+ *
-+ * $Id: 
-+ * Copyright 2005, Broadcom Corporation      
-+ * All Rights Reserved.      
-+ *       
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
-+ */
-+
-+#ifndef	_SBPCIE_H
-+#define	_SBPCIE_H
-+
-+/* cpp contortions to concatenate w/arg prescan */
-+#ifndef PAD
-+#define	_PADLINE(line)	pad ## line
-+#define	_XSTR(line)	_PADLINE(line)
-+#define	PAD		_XSTR(__LINE__)
-+#endif
-+
-+/* PCIE Enumeration space offsets*/
-+#define  PCIE_CORE_CONFIG_OFFSET	0x0
-+#define  PCIE_FUNC0_CONFIG_OFFSET	0x400
-+#define  PCIE_FUNC1_CONFIG_OFFSET	0x500
-+#define  PCIE_FUNC2_CONFIG_OFFSET	0x600
-+#define  PCIE_FUNC3_CONFIG_OFFSET	0x700
-+#define  PCIE_SPROM_SHADOW_OFFSET	0x800
-+#define  PCIE_SBCONFIG_OFFSET		0xE00	
-+
-+/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
-+#define PCIE_BAR0_WINMAPCORE_OFFSET	0x0
-+#define PCIE_BAR0_EXTSPROM_OFFSET	0x1000
-+#define PCIE_BAR0_PCIECORE_OFFSET	0x2000
-+#define PCIE_BAR0_CCCOREREG_OFFSET	0x3000
-+
-+/* SB side: PCIE core and host control registers */
-+typedef struct sbpcieregs {
++/* SB side: PCIE core and host control registers */
++typedef struct sbpcieregs {
 +
 +	uint32 PAD[3];
 +	uint32 biststatus;	 /* bist Status: 0x00C*/
@@ -5398,12 +5157,12 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbpcie.h linux-2.4.32-brcm/arc
 +#define SERDES_RX_CDRBW			7	/* CDR BW */
 +
 +#endif	/* _SBPCIE_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbpci.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbpci.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbpci.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbpci.h	2005-12-16 23:39:10.936836250 +0100
-@@ -0,0 +1,122 @@
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbpcmcia.h linux.dev/arch/mips/bcm947xx/include/sbpcmcia.h
+--- linux.old/arch/mips/bcm947xx/include/sbpcmcia.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbpcmcia.h	2006-04-06 15:34:14.000000000 +0200
+@@ -0,0 +1,146 @@
 +/*
-+ * BCM47XX Sonics SiliconBackplane PCI core hardware definitions.
++ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
 + *
 + * $Id$
 + * Copyright 2005, Broadcom Corporation      
@@ -5415,145 +5174,19 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbpci.h linux-2.4.32-brcm/arch
 + * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
 + */
 +
-+#ifndef	_SBPCI_H
-+#define	_SBPCI_H
++#ifndef	_SBPCMCIA_H
++#define	_SBPCMCIA_H
 +
-+/* cpp contortions to concatenate w/arg prescan */
-+#ifndef PAD
-+#define	_PADLINE(line)	pad ## line
-+#define	_XSTR(line)	_PADLINE(line)
-+#define	PAD		_XSTR(__LINE__)
-+#endif
 +
-+/* Sonics side: PCI core and host control registers */
-+typedef struct sbpciregs {
-+	uint32 control;		/* PCI control */
-+	uint32 PAD[3];
-+	uint32 arbcontrol;	/* PCI arbiter control */
-+	uint32 PAD[3];
-+	uint32 intstatus;	/* Interrupt status */
-+	uint32 intmask;		/* Interrupt mask */
-+	uint32 sbtopcimailbox;	/* Sonics to PCI mailbox */
-+	uint32 PAD[9];
-+	uint32 bcastaddr;	/* Sonics broadcast address */
-+	uint32 bcastdata;	/* Sonics broadcast data */
-+	uint32 PAD[2];
-+	uint32 gpioin;		/* ro: gpio input (>=rev2) */
-+	uint32 gpioout;		/* rw: gpio output (>=rev2) */
-+	uint32 gpioouten;	/* rw: gpio output enable (>= rev2) */
-+	uint32 gpiocontrol;	/* rw: gpio control (>= rev2) */
-+	uint32 PAD[36];
-+	uint32 sbtopci0;	/* Sonics to PCI translation 0 */
-+	uint32 sbtopci1;	/* Sonics to PCI translation 1 */
-+	uint32 sbtopci2;	/* Sonics to PCI translation 2 */
-+	uint32 PAD[445];
-+	uint16 sprom[36];	/* SPROM shadow Area */
-+	uint32 PAD[46];
-+} sbpciregs_t;
++/* All the addresses that are offsets in attribute space are divided
++ * by two to account for the fact that odd bytes are invalid in
++ * attribute space and our read/write routines make the space appear
++ * as if they didn't exist. Still we want to show the original numbers
++ * as documented in the hnd_pcmcia core manual.
++ */
 +
-+/* PCI control */
-+#define PCI_RST_OE	0x01	/* When set, drives PCI_RESET out to pin */
-+#define PCI_RST		0x02	/* Value driven out to pin */
-+#define PCI_CLK_OE	0x04	/* When set, drives clock as gated by PCI_CLK out to pin */
-+#define PCI_CLK		0x08	/* Gate for clock driven out to pin */	
-+
-+/* PCI arbiter control */
-+#define PCI_INT_ARB	0x01	/* When set, use an internal arbiter */
-+#define PCI_EXT_ARB	0x02	/* When set, use an external arbiter */
-+#define PCI_PARKID_MASK	0x06	/* Selects which agent is parked on an idle bus */
-+#define PCI_PARKID_SHIFT   1
-+#define PCI_PARKID_LAST	   0	/* Last requestor */
-+#define PCI_PARKID_4710	   1	/* 4710 */
-+#define PCI_PARKID_EXTREQ0 2	/* External requestor 0 */
-+#define PCI_PARKID_EXTREQ1 3	/* External requestor 1 */
-+
-+/* Interrupt status/mask */
-+#define PCI_INTA	0x01	/* PCI INTA# is asserted */
-+#define PCI_INTB	0x02	/* PCI INTB# is asserted */
-+#define PCI_SERR	0x04	/* PCI SERR# has been asserted (write one to clear) */
-+#define PCI_PERR	0x08	/* PCI PERR# has been asserted (write one to clear) */
-+#define PCI_PME		0x10	/* PCI PME# is asserted */
-+
-+/* (General) PCI/SB mailbox interrupts, two bits per pci function */
-+#define	MAILBOX_F0_0	0x100	/* function 0, int 0 */
-+#define	MAILBOX_F0_1	0x200	/* function 0, int 1 */
-+#define	MAILBOX_F1_0	0x400	/* function 1, int 0 */
-+#define	MAILBOX_F1_1	0x800	/* function 1, int 1 */
-+#define	MAILBOX_F2_0	0x1000	/* function 2, int 0 */
-+#define	MAILBOX_F2_1	0x2000	/* function 2, int 1 */
-+#define	MAILBOX_F3_0	0x4000	/* function 3, int 0 */
-+#define	MAILBOX_F3_1	0x8000	/* function 3, int 1 */
-+
-+/* Sonics broadcast address */
-+#define BCAST_ADDR_MASK	0xff	/* Broadcast register address */
-+
-+/* Sonics to PCI translation types */
-+#define SBTOPCI0_MASK	0xfc000000
-+#define SBTOPCI1_MASK	0xfc000000
-+#define SBTOPCI2_MASK	0xc0000000
-+#define SBTOPCI_MEM	0
-+#define SBTOPCI_IO	1
-+#define SBTOPCI_CFG0	2
-+#define SBTOPCI_CFG1	3
-+#define	SBTOPCI_PREF	0x4		/* prefetch enable */
-+#define	SBTOPCI_BURST	0x8		/* burst enable */
-+#define	SBTOPCI_RC_MASK		0x30	/* read command (>= rev11) */
-+#define	SBTOPCI_RC_READ		0x00	/* memory read */
-+#define	SBTOPCI_RC_READLINE	0x10	/* memory read line */
-+#define	SBTOPCI_RC_READMULTI	0x20	/* memory read multiple */
-+
-+/* PCI core index in SROM shadow area */
-+#define SRSH_PI_OFFSET	0	/* first word */
-+#define SRSH_PI_MASK	0xf000	/* bit 15:12 */
-+#define SRSH_PI_SHIFT	12	/* bit 15:12 */
-+
-+/* PCI side: Reserved PCI configuration registers (see pcicfg.h) */
-+#define cap_list	rsvd_a[0]
-+#define bar0_window	dev_dep[0x80 - 0x40]
-+#define bar1_window	dev_dep[0x84 - 0x40]
-+#define sprom_control	dev_dep[0x88 - 0x40]
-+
-+#ifndef _LANGUAGE_ASSEMBLY
-+
-+extern int sbpci_read_config(sb_t *sbh, uint bus, uint dev, uint func, uint off, void *buf, int len);
-+extern int sbpci_write_config(sb_t *sbh, uint bus, uint dev, uint func, uint off, void *buf, int len);
-+extern void sbpci_ban(uint16 core);
-+extern int sbpci_init(sb_t *sbh);
-+extern void sbpci_check(sb_t *sbh);
-+
-+#endif /* !_LANGUAGE_ASSEMBLY */
-+
-+#endif	/* _SBPCI_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbpcmcia.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbpcmcia.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbpcmcia.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbpcmcia.h	2005-12-16 23:39:10.936836250 +0100
-@@ -0,0 +1,146 @@
-+/*
-+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
-+ *
-+ * $Id$
-+ * Copyright 2005, Broadcom Corporation      
-+ * All Rights Reserved.      
-+ *       
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
-+ */
-+
-+#ifndef	_SBPCMCIA_H
-+#define	_SBPCMCIA_H
-+
-+
-+/* All the addresses that are offsets in attribute space are divided
-+ * by two to account for the fact that odd bytes are invalid in
-+ * attribute space and our read/write routines make the space appear
-+ * as if they didn't exist. Still we want to show the original numbers
-+ * as documented in the hnd_pcmcia core manual.
-+ */
-+
-+/* PCMCIA Function Configuration Registers */
-+#define	PCMCIA_FCR		(0x700 / 2)
++/* PCMCIA Function Configuration Registers */
++#define	PCMCIA_FCR		(0x700 / 2)
 +
 +#define	FCR0_OFF		0
 +#define	FCR1_OFF		(0x40 / 2)
@@ -5674,9 +5307,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbpcmcia.h linux-2.4.32-brcm/a
 +#define SBTMH_INT_STATUS	0x40000		/* sb interrupt status */
 +
 +#endif	/* _SBPCMCIA_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbsdram.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbsdram.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbsdram.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbsdram.h	2005-12-16 23:39:10.936836250 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbsdram.h linux.dev/arch/mips/bcm947xx/include/sbsdram.h
+--- linux.old/arch/mips/bcm947xx/include/sbsdram.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbsdram.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,75 @@
 +/*
 + * BCM47XX Sonics SiliconBackplane SDRAM controller core hardware definitions.
@@ -5753,9 +5386,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbsdram.h linux-2.4.32-brcm/ar
 +#define MEM8MX16X2	0xc29	/* 32 MB */
 +
 +#endif	/* _SBSDRAM_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbsocram.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbsocram.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbsocram.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbsocram.h	2005-12-16 23:39:10.936836250 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbsocram.h linux.dev/arch/mips/bcm947xx/include/sbsocram.h
+--- linux.old/arch/mips/bcm947xx/include/sbsocram.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbsocram.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,37 @@
 +/*
 + * BCM47XX Sonics SiliconBackplane embedded ram core
@@ -5794,9 +5427,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbsocram.h linux-2.4.32-brcm/a
 +#define SOCRAM_MEMSIZE_BASESHIFT 16
 +
 +#endif	/* _SBSOCRAM_H */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbutils.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbutils.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sbutils.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sbutils.h	2005-12-16 23:39:10.936836250 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/sbutils.h linux.dev/arch/mips/bcm947xx/include/sbutils.h
+--- linux.old/arch/mips/bcm947xx/include/sbutils.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sbutils.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,140 @@
 +/*
 + * Misc utility routines for accessing chip-specific features
@@ -5938,9 +5571,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sbutils.h linux-2.4.32-brcm/ar
 +#define SB_DEVPATH_BUFSZ	16	/* min buffer size in bytes */
 +
 +#endif	/* _sbutils_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sflash.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/sflash.h
---- linux-2.4.32/arch/mips/bcm947xx/include/sflash.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/sflash.h	2005-12-16 23:39:10.936836250 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/sflash.h linux.dev/arch/mips/bcm947xx/include/sflash.h
+--- linux.old/arch/mips/bcm947xx/include/sflash.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/sflash.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,36 @@
 +/*
 + * Broadcom SiliconBackplane chipcommon serial flash interface
@@ -5978,9 +5611,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/sflash.h linux-2.4.32-brcm/arc
 +extern struct sflash * sflash_init(chipcregs_t *cc);
 +
 +#endif /* _sflash_h_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/trxhdr.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/trxhdr.h
---- linux-2.4.32/arch/mips/bcm947xx/include/trxhdr.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/trxhdr.h	2005-12-16 23:39:10.940836500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/trxhdr.h linux.dev/arch/mips/bcm947xx/include/trxhdr.h
+--- linux.old/arch/mips/bcm947xx/include/trxhdr.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/trxhdr.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,33 @@
 +/*
 + * TRX image file header format.
@@ -6015,9 +5648,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/trxhdr.h linux-2.4.32-brcm/arc
 +
 +/* Compatibility */
 +typedef struct trx_header TRXHDR, *PTRXHDR;
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/typedefs.h linux-2.4.32-brcm/arch/mips/bcm947xx/include/typedefs.h
---- linux-2.4.32/arch/mips/bcm947xx/include/typedefs.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/include/typedefs.h	2005-12-16 23:39:10.940836500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/include/typedefs.h linux.dev/arch/mips/bcm947xx/include/typedefs.h
+--- linux.old/arch/mips/bcm947xx/include/typedefs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/include/typedefs.h	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,326 @@
 +/*
 + * Copyright 2005, Broadcom Corporation      
@@ -6345,28 +5978,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/include/typedefs.h linux-2.4.32-brcm/a
 +#endif /* USE_TYPEDEF_DEFAULTS */
 +
 +#endif /* _TYPEDEFS_H_ */
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/Makefile linux-2.4.32-brcm/arch/mips/bcm947xx/Makefile
---- linux-2.4.32/arch/mips/bcm947xx/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/Makefile	2005-12-19 01:56:51.733868750 +0100
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the BCM947xx specific kernel interface routines
-+# under Linux.
-+#
-+
-+EXTRA_CFLAGS+=-I$(TOPDIR)/arch/mips/bcm947xx/include -DBCMDRIVER
-+
-+O_TARGET        := bcm947xx.o
-+
-+export-objs     := nvram_linux.o setup.o
-+obj-y		:= prom.o setup.o time.o sbmips.o gpio.o
-+obj-y		+= nvram.o nvram_linux.o sflash.o cfe_env.o
-+obj-$(CONFIG_PCI) += sbpci.o pcibios.o
-+
-+include $(TOPDIR)/Rules.make
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/nvram.c linux-2.4.32-brcm/arch/mips/bcm947xx/nvram.c
---- linux-2.4.32/arch/mips/bcm947xx/nvram.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/nvram.c	2005-12-19 01:05:00.079582750 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/nvram.c linux.dev/arch/mips/bcm947xx/nvram.c
+--- linux.old/arch/mips/bcm947xx/nvram.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/nvram.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,320 @@
 +/*
 + * NVRAM variable manipulation (common)
@@ -6688,9 +6302,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/nvram.c linux-2.4.32-brcm/arch/mips/bc
 +{
 +	BCMINIT(nvram_free)();
 +}
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/nvram_linux.c linux-2.4.32-brcm/arch/mips/bcm947xx/nvram_linux.c
---- linux-2.4.32/arch/mips/bcm947xx/nvram_linux.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/nvram_linux.c	2005-12-19 01:09:59.782313000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/nvram_linux.c linux.dev/arch/mips/bcm947xx/nvram_linux.c
+--- linux.old/arch/mips/bcm947xx/nvram_linux.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/nvram_linux.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,653 @@
 +/*
 + * NVRAM variable manipulation (Linux kernel half)
@@ -7345,9 +6959,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/nvram_linux.c linux-2.4.32-brcm/arch/m
 +
 +module_init(dev_nvram_init);
 +module_exit(dev_nvram_exit);
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/pcibios.c linux-2.4.32-brcm/arch/mips/bcm947xx/pcibios.c
---- linux-2.4.32/arch/mips/bcm947xx/pcibios.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/pcibios.c	2005-12-16 23:39:10.944836750 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/pcibios.c linux.dev/arch/mips/bcm947xx/pcibios.c
+--- linux.old/arch/mips/bcm947xx/pcibios.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/pcibios.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,355 @@
 +/*
 + * Low-Level PCI and SB support for BCM47xx (Linux support code)
@@ -7704,9 +7318,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/pcibios.c linux-2.4.32-brcm/arch/mips/
 +	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 +}
 +
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/prom.c linux-2.4.32-brcm/arch/mips/bcm947xx/prom.c
---- linux-2.4.32/arch/mips/bcm947xx/prom.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/prom.c	2005-12-16 23:39:10.944836750 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/prom.c linux.dev/arch/mips/bcm947xx/prom.c
+--- linux.old/arch/mips/bcm947xx/prom.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/prom.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,41 @@
 +/*
 + * Early initialization code for BCM94710 boards
@@ -7749,9 +7363,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/prom.c linux-2.4.32-brcm/arch/mips/bcm
 +prom_free_prom_memory(void)
 +{
 +}
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/sbmips.c linux-2.4.32-brcm/arch/mips/bcm947xx/sbmips.c
---- linux-2.4.32/arch/mips/bcm947xx/sbmips.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/sbmips.c	2005-12-16 23:39:10.944836750 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/sbmips.c linux.dev/arch/mips/bcm947xx/sbmips.c
+--- linux.old/arch/mips/bcm947xx/sbmips.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/sbmips.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,1038 @@
 +/*
 + * BCM47XX Sonics SiliconBackplane MIPS core routines
@@ -8791,9 +8405,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/sbmips.c linux-2.4.32-brcm/arch/mips/b
 +	return ret;
 +}
 +
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/sbpci.c linux-2.4.32-brcm/arch/mips/bcm947xx/sbpci.c
---- linux-2.4.32/arch/mips/bcm947xx/sbpci.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/sbpci.c	2005-12-16 23:39:10.948837000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/sbpci.c linux.dev/arch/mips/bcm947xx/sbpci.c
+--- linux.old/arch/mips/bcm947xx/sbpci.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/sbpci.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,588 @@
 +/*
 + * Low-Level PCI and SB support for BCM47xx
@@ -9383,9 +8997,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/sbpci.c linux-2.4.32-brcm/arch/mips/bc
 +	sb_setcoreidx(sbh, coreidx);
 +}
 +
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/setup.c linux-2.4.32-brcm/arch/mips/bcm947xx/setup.c
---- linux-2.4.32/arch/mips/bcm947xx/setup.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/setup.c	2005-12-20 00:29:40.187416500 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/setup.c linux.dev/arch/mips/bcm947xx/setup.c
+--- linux.old/arch/mips/bcm947xx/setup.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/setup.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,234 @@
 +/*
 + *  Generic setup routines for Broadcom MIPS boards
@@ -9621,9 +9235,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/setup.c linux-2.4.32-brcm/arch/mips/bc
 +}
 +
 +EXPORT_SYMBOL(bcm947xx_sbh);
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/sflash.c linux-2.4.32-brcm/arch/mips/bcm947xx/sflash.c
---- linux-2.4.32/arch/mips/bcm947xx/sflash.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/sflash.c	2005-12-16 23:39:10.948837000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/sflash.c linux.dev/arch/mips/bcm947xx/sflash.c
+--- linux.old/arch/mips/bcm947xx/sflash.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/sflash.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,418 @@
 +/*
 + * Broadcom SiliconBackplane chipcommon serial flash interface
@@ -10043,9 +9657,9 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/sflash.c linux-2.4.32-brcm/arch/mips/b
 +	return ret;
 +}
 +
-diff -Nur linux-2.4.32/arch/mips/bcm947xx/time.c linux-2.4.32-brcm/arch/mips/bcm947xx/time.c
---- linux-2.4.32/arch/mips/bcm947xx/time.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/bcm947xx/time.c	2005-12-16 23:39:10.948837000 +0100
+diff -Naur linux.old/arch/mips/bcm947xx/time.c linux.dev/arch/mips/bcm947xx/time.c
+--- linux.old/arch/mips/bcm947xx/time.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/arch/mips/bcm947xx/time.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,118 @@
 +/*
 + * Copyright 2004, Broadcom Corporation
@@ -10165,10 +9779,10 @@ diff -Nur linux-2.4.32/arch/mips/bcm947xx/time.c linux-2.4.32-brcm/arch/mips/bcm
 +	/* Enable the timer interrupt */
 +	setup_irq(7, &bcm947xx_timer_irqaction);
 +}
-diff -Nur linux-2.4.32/arch/mips/config-shared.in linux-2.4.32-brcm/arch/mips/config-shared.in
---- linux-2.4.32/arch/mips/config-shared.in	2005-01-19 15:09:27.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/config-shared.in	2005-12-16 23:39:11.080845250 +0100
-@@ -205,6 +205,14 @@
+diff -Naur linux.old/arch/mips/config-shared.in linux.dev/arch/mips/config-shared.in
+--- linux.old/arch/mips/config-shared.in	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/config-shared.in	2006-04-06 15:34:14.000000000 +0200
+@@ -208,6 +208,14 @@
     fi
     define_bool CONFIG_MIPS_RTC y
  fi
@@ -10183,7 +9797,7 @@ diff -Nur linux-2.4.32/arch/mips/config-shared.in linux-2.4.32-brcm/arch/mips/co
  bool 'Support for SNI RM200 PCI' CONFIG_SNI_RM200_PCI
  bool 'Support for TANBAC TB0226 (Mbase)' CONFIG_TANBAC_TB0226
  bool 'Support for TANBAC TB0229 (VR4131DIMM)' CONFIG_TANBAC_TB0229
-@@ -226,6 +234,11 @@
+@@ -229,6 +237,11 @@
  define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n
  
  #
@@ -10195,7 +9809,7 @@ diff -Nur linux-2.4.32/arch/mips/config-shared.in linux-2.4.32-brcm/arch/mips/co
  # Select some configuration options automatically based on user selections.
  #
  if [ "$CONFIG_ACER_PICA_61" = "y" ]; then
-@@ -533,6 +546,13 @@
+@@ -554,6 +567,13 @@
     define_bool CONFIG_SWAP_IO_SPACE_L y
     define_bool CONFIG_BOOT_ELF32 y
  fi
@@ -10209,7 +9823,7 @@ diff -Nur linux-2.4.32/arch/mips/config-shared.in linux-2.4.32-brcm/arch/mips/co
  if [ "$CONFIG_SNI_RM200_PCI" = "y" ]; then
     define_bool CONFIG_ARC32 y
     define_bool CONFIG_ARC_MEMORY y
-@@ -1011,7 +1031,11 @@
+@@ -1042,7 +1062,11 @@
  
  bool 'Are you using a crosscompiler' CONFIG_CROSSCOMPILE
  bool 'Enable run-time debugging' CONFIG_RUNTIME_DEBUG
@@ -10222,10 +9836,10 @@ diff -Nur linux-2.4.32/arch/mips/config-shared.in linux-2.4.32-brcm/arch/mips/co
  dep_bool '  Console output to GDB' CONFIG_GDB_CONSOLE $CONFIG_KGDB
  if [ "$CONFIG_KGDB" = "y" ]; then
     define_bool CONFIG_DEBUG_INFO y
-diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/kernel/cpu-probe.c
---- linux-2.4.32/arch/mips/kernel/cpu-probe.c	2005-01-19 15:09:29.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/kernel/cpu-probe.c	2005-12-16 23:39:11.084845500 +0100
-@@ -174,7 +174,7 @@
+diff -Naur linux.old/arch/mips/kernel/cpu-probe.c linux.dev/arch/mips/kernel/cpu-probe.c
+--- linux.old/arch/mips/kernel/cpu-probe.c	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/kernel/cpu-probe.c	2006-04-06 15:34:14.000000000 +0200
+@@ -162,7 +162,7 @@
  
  static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
  {
@@ -10234,7 +9848,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  	case PRID_IMP_R2000:
  		c->cputype = CPU_R2000;
  		c->isa_level = MIPS_CPU_ISA_I;
-@@ -184,7 +184,7 @@
+@@ -172,7 +172,7 @@
  		c->tlbsize = 64;
  		break;
  	case PRID_IMP_R3000:
@@ -10243,7 +9857,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  			if (cpu_has_confreg())
  				c->cputype = CPU_R3081E;
  			else
-@@ -199,12 +199,12 @@
+@@ -187,12 +187,12 @@
  		break;
  	case PRID_IMP_R4000:
  		if (read_c0_config() & CONF_SC) {
@@ -10258,7 +9872,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  				c->cputype = CPU_R4400SC;
  			else
  				c->cputype = CPU_R4000SC;
-@@ -450,7 +450,7 @@
+@@ -438,7 +438,7 @@
  static inline void cpu_probe_mips(struct cpuinfo_mips *c)
  {
  	decode_config1(c);
@@ -10267,7 +9881,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  	case PRID_IMP_4KC:
  		c->cputype = CPU_4KC;
  		c->isa_level = MIPS_CPU_ISA_M32;
-@@ -491,10 +491,10 @@
+@@ -479,10 +479,10 @@
  {
  	decode_config1(c);
  	c->options |= MIPS_CPU_PREFETCH;
@@ -10280,7 +9894,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  		case 0:
   			c->cputype = CPU_AU1000;
  			break;
-@@ -522,10 +522,34 @@
+@@ -510,10 +510,34 @@
  	}
  }
  
@@ -10316,7 +9930,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  	case PRID_IMP_SB1:
  		c->cputype = CPU_SB1;
  		c->isa_level = MIPS_CPU_ISA_M64;
-@@ -547,7 +571,7 @@
+@@ -535,7 +559,7 @@
  static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
  {
  	decode_config1(c);
@@ -10325,7 +9939,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  	case PRID_IMP_SR71000:
  		c->cputype = CPU_SR71000;
  		c->isa_level = MIPS_CPU_ISA_M64;
-@@ -572,7 +596,7 @@
+@@ -560,7 +584,7 @@
  	c->cputype	= CPU_UNKNOWN;
  
  	c->processor_id = read_c0_prid();
@@ -10334,7 +9948,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  
  	case PRID_COMP_LEGACY:
  		cpu_probe_legacy(c);
-@@ -583,6 +607,9 @@
+@@ -571,6 +595,9 @@
  	case PRID_COMP_ALCHEMY:
  		cpu_probe_alchemy(c);
  		break;
@@ -10344,9 +9958,9 @@ diff -Nur linux-2.4.32/arch/mips/kernel/cpu-probe.c linux-2.4.32-brcm/arch/mips/
  	case PRID_COMP_SIBYTE:
  		cpu_probe_sibyte(c);
  		break;
-diff -Nur linux-2.4.32/arch/mips/kernel/head.S linux-2.4.32-brcm/arch/mips/kernel/head.S
---- linux-2.4.32/arch/mips/kernel/head.S	2005-01-19 15:09:29.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/kernel/head.S	2005-12-16 23:39:11.084845500 +0100
+diff -Naur linux.old/arch/mips/kernel/head.S linux.dev/arch/mips/kernel/head.S
+--- linux.old/arch/mips/kernel/head.S	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/kernel/head.S	2006-04-06 15:34:14.000000000 +0200
 @@ -28,12 +28,20 @@
  #include <asm/mipsregs.h>
  #include <asm/stackframe.h>
@@ -10369,9 +9983,9 @@ diff -Nur linux-2.4.32/arch/mips/kernel/head.S linux-2.4.32-brcm/arch/mips/kerne
  
  		/* The following two symbols are used for kernel profiling. */
  		EXPORT(stext)
-diff -Nur linux-2.4.32/arch/mips/kernel/proc.c linux-2.4.32-brcm/arch/mips/kernel/proc.c
---- linux-2.4.32/arch/mips/kernel/proc.c	2005-01-19 15:09:29.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/kernel/proc.c	2005-12-16 23:39:11.084845500 +0100
+diff -Naur linux.old/arch/mips/kernel/proc.c linux.dev/arch/mips/kernel/proc.c
+--- linux.old/arch/mips/kernel/proc.c	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/kernel/proc.c	2006-04-06 15:34:14.000000000 +0200
 @@ -78,9 +78,10 @@
  	[CPU_AU1550]	"Au1550",
  	[CPU_24K]	"MIPS 24K",
@@ -10384,10 +9998,10 @@ diff -Nur linux-2.4.32/arch/mips/kernel/proc.c linux-2.4.32-brcm/arch/mips/kerne
  static int show_cpuinfo(struct seq_file *m, void *v)
  {
  	unsigned int version = current_cpu_data.processor_id;
-diff -Nur linux-2.4.32/arch/mips/kernel/setup.c linux-2.4.32-brcm/arch/mips/kernel/setup.c
---- linux-2.4.32/arch/mips/kernel/setup.c	2005-01-19 15:09:29.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/kernel/setup.c	2005-12-16 23:39:11.140849000 +0100
-@@ -495,6 +495,7 @@
+diff -Naur linux.old/arch/mips/kernel/setup.c linux.dev/arch/mips/kernel/setup.c
+--- linux.old/arch/mips/kernel/setup.c	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/kernel/setup.c	2006-04-06 15:34:14.000000000 +0200
+@@ -493,6 +493,7 @@
  	void swarm_setup(void);
  	void hp_setup(void);
  	void au1x00_setup(void);
@@ -10395,7 +10009,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/setup.c linux-2.4.32-brcm/arch/mips/kern
  	void frame_info_init(void);
  
  	frame_info_init();
-@@ -693,6 +694,11 @@
+@@ -691,6 +692,11 @@
                  pmc_yosemite_setup();
                  break;
  #endif
@@ -10407,10 +10021,10 @@ diff -Nur linux-2.4.32/arch/mips/kernel/setup.c linux-2.4.32-brcm/arch/mips/kern
  	default:
  		panic("Unsupported architecture");
  	}
-diff -Nur linux-2.4.32/arch/mips/kernel/traps.c linux-2.4.32-brcm/arch/mips/kernel/traps.c
---- linux-2.4.32/arch/mips/kernel/traps.c	2005-01-19 15:09:29.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/kernel/traps.c	2005-12-16 23:39:11.140849000 +0100
-@@ -913,6 +913,7 @@
+diff -Naur linux.old/arch/mips/kernel/traps.c linux.dev/arch/mips/kernel/traps.c
+--- linux.old/arch/mips/kernel/traps.c	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/kernel/traps.c	2006-04-06 15:34:14.000000000 +0200
+@@ -920,6 +920,7 @@
  void __init trap_init(void)
  {
  	extern char except_vec1_generic;
@@ -10418,7 +10032,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/traps.c linux-2.4.32-brcm/arch/mips/kern
  	extern char except_vec3_generic, except_vec3_r4000;
  	extern char except_vec_ejtag_debug;
  	extern char except_vec4;
-@@ -922,6 +923,7 @@
+@@ -927,6 +928,7 @@
  
  	/* Copy the generic exception handler code to it's final destination. */
  	memcpy((void *)(KSEG0 + 0x80), &except_vec1_generic, 0x80);
@@ -10426,7 +10040,7 @@ diff -Nur linux-2.4.32/arch/mips/kernel/traps.c linux-2.4.32-brcm/arch/mips/kern
  
  	/*
  	 * Setup default vectors
-@@ -980,6 +982,12 @@
+@@ -985,6 +987,12 @@
  	set_except_vector(13, handle_tr);
  	set_except_vector(22, handle_mdmx);
  
@@ -10439,41 +10053,10 @@ diff -Nur linux-2.4.32/arch/mips/kernel/traps.c linux-2.4.32-brcm/arch/mips/kern
  	if (cpu_has_fpu && !cpu_has_nofpuex)
  		set_except_vector(15, handle_fpe);
  
-diff -Nur linux-2.4.32/arch/mips/Makefile linux-2.4.32-brcm/arch/mips/Makefile
---- linux-2.4.32/arch/mips/Makefile	2005-01-19 15:09:26.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/Makefile	2005-12-16 23:39:10.668819500 +0100
-@@ -715,6 +715,19 @@
- endif
- 
- #
-+# Broadcom BCM947XX variants
-+#
-+ifdef CONFIG_BCM947XX
-+LIBS		+= arch/mips/bcm947xx/generic/brcm.o arch/mips/bcm947xx/bcm947xx.o 
-+SUBDIRS		+= arch/mips/bcm947xx/generic arch/mips/bcm947xx 
-+LOADADDR	:= 0x80001000
-+
-+zImage: vmlinux
-+	$(MAKE) -C arch/$(ARCH)/bcm947xx/compressed
-+export LOADADDR
-+endif
-+
-+#
- # Choosing incompatible machines durings configuration will result in
- # error messages during linking.  Select a default linkscript if
- # none has been choosen above.
-@@ -767,6 +780,7 @@
- 	$(MAKE) -C arch/$(ARCH)/tools clean
- 	$(MAKE) -C arch/mips/baget clean
- 	$(MAKE) -C arch/mips/lasat clean
-+	$(MAKE) -C arch/mips/bcm947xx/compressed clean
- 
- archmrproper:
- 	@$(MAKEBOOT) mrproper
-diff -Nur linux-2.4.32/arch/mips/mm/c-r4k.c linux-2.4.32-brcm/arch/mips/mm/c-r4k.c
---- linux-2.4.32/arch/mips/mm/c-r4k.c	2005-01-19 15:09:29.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/mm/c-r4k.c	2005-12-16 23:39:11.144849250 +0100
-@@ -1114,3 +1114,47 @@
+diff -Naur linux.old/arch/mips/mm/c-r4k.c linux.dev/arch/mips/mm/c-r4k.c
+--- linux.old/arch/mips/mm/c-r4k.c	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/mm/c-r4k.c	2006-04-06 15:34:15.000000000 +0200
+@@ -1166,3 +1166,47 @@
  	build_clear_page();
  	build_copy_page();
  }
@@ -10521,9 +10104,9 @@ diff -Nur linux-2.4.32/arch/mips/mm/c-r4k.c linux-2.4.32-brcm/arch/mips/mm/c-r4k
 +}
 +
 +
-diff -Nur linux-2.4.32/arch/mips/pci/Makefile linux-2.4.32-brcm/arch/mips/pci/Makefile
---- linux-2.4.32/arch/mips/pci/Makefile	2005-01-19 15:09:29.000000000 +0100
-+++ linux-2.4.32-brcm/arch/mips/pci/Makefile	2005-12-16 23:39:11.144849250 +0100
+diff -Naur linux.old/arch/mips/pci/Makefile linux.dev/arch/mips/pci/Makefile
+--- linux.old/arch/mips/pci/Makefile	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/arch/mips/pci/Makefile	2006-04-06 15:34:14.000000000 +0200
 @@ -13,7 +13,9 @@
  obj-$(CONFIG_MIPS_MSC)		+= ops-msc.o
  obj-$(CONFIG_MIPS_NILE4)	+= ops-nile4.o
@@ -10534,10 +10117,10 @@ diff -Nur linux-2.4.32/arch/mips/pci/Makefile linux-2.4.32-brcm/arch/mips/pci/Ma
  obj-$(CONFIG_PCI_AUTO)		+= pci_auto.o
  
  include $(TOPDIR)/Rules.make
-diff -Nur linux-2.4.32/drivers/char/serial.c linux-2.4.32-brcm/drivers/char/serial.c
---- linux-2.4.32/drivers/char/serial.c	2005-11-16 20:12:54.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/char/serial.c	2005-12-16 23:39:11.200852750 +0100
-@@ -422,6 +422,10 @@
+diff -Naur linux.old/drivers/char/serial.c linux.dev/drivers/char/serial.c
+--- linux.old/drivers/char/serial.c	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/drivers/char/serial.c	2006-04-06 15:34:14.000000000 +0200
+@@ -444,6 +444,10 @@
  		return inb(info->port+1);
  #endif
  	case SERIAL_IO_MEM:
@@ -10548,7 +10131,7 @@ diff -Nur linux-2.4.32/drivers/char/serial.c linux-2.4.32-brcm/drivers/char/seri
  		return readb((unsigned long) info->iomem_base +
  			     (offset<<info->iomem_reg_shift));
  	default:
-@@ -442,6 +446,9 @@
+@@ -464,6 +468,9 @@
  	case SERIAL_IO_MEM:
  		writeb(value, (unsigned long) info->iomem_base +
  			      (offset<<info->iomem_reg_shift));
@@ -10558,7 +10141,7 @@ diff -Nur linux-2.4.32/drivers/char/serial.c linux-2.4.32-brcm/drivers/char/seri
  		break;
  	default:
  		outb(value, info->port+offset);
-@@ -1704,7 +1711,7 @@
+@@ -1728,7 +1735,7 @@
  			/* Special case since 134 is really 134.5 */
  			quot = (2*baud_base / 269);
  		else if (baud)
@@ -10567,7 +10150,7 @@ diff -Nur linux-2.4.32/drivers/char/serial.c linux-2.4.32-brcm/drivers/char/seri
  	}
  	/* If the quotient is zero refuse the change */
  	if (!quot && old_termios) {
-@@ -1721,12 +1728,12 @@
+@@ -1745,12 +1752,12 @@
  				/* Special case since 134 is really 134.5 */
  				quot = (2*baud_base / 269);
  			else if (baud)
@@ -10582,7 +10165,7 @@ diff -Nur linux-2.4.32/drivers/char/serial.c linux-2.4.32-brcm/drivers/char/seri
  	/*
  	 * Work around a bug in the Oxford Semiconductor 952 rev B
  	 * chip which causes it to seriously miscalculate baud rates
-@@ -5982,6 +5989,13 @@
+@@ -5994,6 +6001,13 @@
  	 *	Divisor, bytesize and parity
  	 */
  	state = rs_table + co->index;
@@ -10596,7 +10179,7 @@ diff -Nur linux-2.4.32/drivers/char/serial.c linux-2.4.32-brcm/drivers/char/seri
  	if (doflow)
  		state->flags |= ASYNC_CONS_FLOW;
  	info = &async_sercons;
-@@ -5995,7 +6009,7 @@
+@@ -6007,7 +6021,7 @@
  	info->io_type = state->io_type;
  	info->iomem_base = state->iomem_base;
  	info->iomem_reg_shift = state->iomem_reg_shift;
@@ -10605,9 +10188,9 @@ diff -Nur linux-2.4.32/drivers/char/serial.c linux-2.4.32-brcm/drivers/char/seri
  	cval = cflag & (CSIZE | CSTOPB);
  #if defined(__powerpc__) || defined(__alpha__)
  	cval >>= 8;
-diff -Nur linux-2.4.32/drivers/net/Config.in linux-2.4.32-brcm/drivers/net/Config.in
---- linux-2.4.32/drivers/net/Config.in	2005-01-19 15:09:56.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/Config.in	2005-12-16 23:39:11.232854750 +0100
+diff -Naur linux.old/drivers/net/Config.in linux.dev/drivers/net/Config.in
+--- linux.old/drivers/net/Config.in	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/drivers/net/Config.in	2006-04-06 15:34:14.000000000 +0200
 @@ -2,6 +2,8 @@
  # Network device configuration
  #
@@ -10617,9 +10200,65 @@ diff -Nur linux-2.4.32/drivers/net/Config.in linux-2.4.32-brcm/drivers/net/Confi
  source drivers/net/arcnet/Config.in
  
  tristate 'Dummy net driver support' CONFIG_DUMMY
-diff -Nur linux-2.4.32/drivers/net/hnd/bcmsrom.c linux-2.4.32-brcm/drivers/net/hnd/bcmsrom.c
---- linux-2.4.32/drivers/net/hnd/bcmsrom.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/hnd/bcmsrom.c	2005-12-16 23:39:11.284858000 +0100
+diff -Naur linux.old/drivers/net/Makefile linux.dev/drivers/net/Makefile
+--- linux.old/drivers/net/Makefile	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/drivers/net/Makefile	2006-04-06 16:45:29.000000000 +0200
+@@ -3,6 +3,8 @@
+ # Makefile for the Linux network (ethercard) device drivers.
+ #
+ 
++EXTRA_CFLAGS := -I$(TOPDIR)/arch/mips/bcm947xx/include
++
+ obj-y           :=
+ obj-m           :=
+ obj-n           :=
+@@ -39,6 +41,9 @@
+   obj-$(CONFIG_ISDN) += slhc.o
+ endif
+ 
++subdir-$(CONFIG_HND) += hnd
++subdir-$(CONFIG_WL) += wl
++subdir-$(CONFIG_WL2) += wl2
+ subdir-$(CONFIG_NET_PCMCIA) += pcmcia
+ subdir-$(CONFIG_NET_WIRELESS) += wireless
+ subdir-$(CONFIG_TULIP) += tulip
+@@ -69,6 +74,10 @@
+ obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
+ obj-$(CONFIG_SUNGEM) += sungem.o
+ 
++ifeq ($(CONFIG_HND),y)
++  obj-y += hnd/hnd.o
++endif
++
+ obj-$(CONFIG_MACE) += mace.o
+ obj-$(CONFIG_BMAC) += bmac.o
+ obj-$(CONFIG_GMAC) += gmac.o
+diff -Naur linux.old/drivers/net/hnd/Makefile linux.dev/drivers/net/hnd/Makefile
+--- linux.old/drivers/net/hnd/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/hnd/Makefile	2006-04-06 16:20:00.000000000 +0200
+@@ -0,0 +1,19 @@
++#
++# Makefile for the BCM47xx specific kernel interface routines
++# under Linux.
++#
++
++EXTRA_CFLAGS	+= -I$(TOPDIR)/arch/mips/bcm947xx/include -DBCMDRIVER
++
++O_TARGET	:= hnd.o
++
++HND_OBJS	:= bcmutils.o linux_osl.o sbutils.o bcmsrom.o
++
++export-objs	:= shared_ksyms.o
++obj-y		:= shared_ksyms.o $(HND_OBJS)
++obj-m           := $(O_TARGET)
++
++include $(TOPDIR)/Rules.make
++
++shared_ksyms.c: shared_ksyms.sh $(HND_OBJS)
++	sh -e $< $(HND_OBJS) > $@
+diff -Naur linux.old/drivers/net/hnd/bcmsrom.c linux.dev/drivers/net/hnd/bcmsrom.c
+--- linux.old/drivers/net/hnd/bcmsrom.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/hnd/bcmsrom.c	2006-04-06 15:34:14.000000000 +0200
 @@ -0,0 +1,938 @@
 +/*
 + *  Misc useful routines to access NIC SROM/OTP .
@@ -11559,10 +11198,10 @@ diff -Nur linux-2.4.32/drivers/net/hnd/bcmsrom.c linux-2.4.32-brcm/drivers/net/h
 +	return (rc);
 +}
 +
-diff -Nur linux-2.4.32/drivers/net/hnd/bcmutils.c linux-2.4.32-brcm/drivers/net/hnd/bcmutils.c
---- linux-2.4.32/drivers/net/hnd/bcmutils.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/hnd/bcmutils.c	2005-12-16 23:39:11.288858250 +0100
-@@ -0,0 +1,1081 @@
+diff -Naur linux.old/drivers/net/hnd/bcmutils.c linux.dev/drivers/net/hnd/bcmutils.c
+--- linux.old/drivers/net/hnd/bcmutils.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/hnd/bcmutils.c	2006-04-06 16:05:56.000000000 +0200
+@@ -0,0 +1,875 @@
 +/*
 + * Misc useful OS-independent routines.
 + *
@@ -11590,254 +11229,48 @@ diff -Nur linux-2.4.32/drivers/net/hnd/bcmutils.c linux-2.4.32-brcm/drivers/net/
 +#include <bcmdevs.h>
 +
 +#ifdef BCMDRIVER
-+/* copy a pkt buffer chain into a buffer */
-+uint
-+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
-+{
-+	uint n, ret = 0;
-+
-+	if (len < 0)
-+		len = 4096;	/* "infinite" */
-+
-+	/* skip 'offset' bytes */
-+	for (; p && offset; p = PKTNEXT(osh, p)) {
-+		if (offset < (uint)PKTLEN(osh, p))
-+			break;
-+		offset -= PKTLEN(osh, p);
-+	}
-+
-+	if (!p)
-+		return 0;
-+
-+	/* copy the data */
-+	for (; p && len; p = PKTNEXT(osh, p)) {
-+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
-+		bcopy(PKTDATA(osh, p) + offset, buf, n);
-+		buf += n;
-+		len -= n;
-+		ret += n;
-+		offset = 0;
-+	}
++unsigned char bcm_ctype[] = {
++	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 0-7 */
++	_BCM_C,_BCM_C|_BCM_S,_BCM_C|_BCM_S,_BCM_C|_BCM_S,_BCM_C|_BCM_S,_BCM_C|_BCM_S,_BCM_C,_BCM_C,		/* 8-15 */
++	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 16-23 */
++	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 24-31 */
++	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 32-39 */
++	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 40-47 */
++	_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,			/* 48-55 */
++	_BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 56-63 */
++	_BCM_P,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U,	/* 64-71 */
++	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 72-79 */
++	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 80-87 */
++	_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 88-95 */
++	_BCM_P,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L,	/* 96-103 */
++	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,			/* 104-111 */
++	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,			/* 112-119 */
++	_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C,			/* 120-127 */
++	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,		/* 128-143 */
++	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,		/* 144-159 */
++	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,   /* 160-175 */
++	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,       /* 176-191 */
++	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,       /* 192-207 */
++	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_L,       /* 208-223 */
++	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,       /* 224-239 */
++	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L        /* 240-255 */
++};
 +
-+	return ret;
++uchar
++bcm_toupper(uchar c)
++{
++	if (bcm_islower(c))
++		c -= 'a'-'A';
++	return (c);
 +}
 +
-+/* return total length of buffer chain */
-+uint
-+pkttotlen(osl_t *osh, void *p)
++ulong
++bcm_strtoul(char *cp, char **endp, uint base)
 +{
-+	uint total;
-+
-+	total = 0;
-+	for (; p; p = PKTNEXT(osh, p))
-+		total += PKTLEN(osh, p);
-+	return (total);
-+}
-+
-+void
-+pktq_init(struct pktq *q, uint maxlen, const uint8 prio_map[])
-+{
-+	q->head = q->tail = NULL;
-+	q->maxlen = maxlen;
-+	q->len = 0;
-+	if (prio_map) {
-+		q->priority = TRUE;
-+		bcopy(prio_map, q->prio_map, sizeof(q->prio_map));
-+	}
-+	else
-+		q->priority = FALSE;
-+}
-+
-+/* should always check pktq_full before calling pktenq */
-+void
-+pktenq(struct pktq *q, void *p, bool lifo)
-+{
-+	void *next, *prev;
-+
-+	/* allow 10 pkts slack */
-+	ASSERT(q->len < (q->maxlen + 10));
-+
-+	/* Queueing chains not allowed */
-+	ASSERT(PKTLINK(p) == NULL);
-+
-+	/* Queue is empty */
-+	if (q->tail == NULL) {
-+		ASSERT(q->head == NULL);
-+		q->head = q->tail = p;
-+	}
-+
-+	/* Insert at head or tail */
-+	else if (q->priority == FALSE) {
-+		/* Insert at head (LIFO) */
-+		if (lifo) {
-+			PKTSETLINK(p, q->head);
-+			q->head = p;
-+		}
-+		/* Insert at tail (FIFO) */
-+		else {
-+			ASSERT(PKTLINK(q->tail) == NULL);
-+			PKTSETLINK(q->tail, p);
-+			PKTSETLINK(p, NULL);
-+			q->tail = p;
-+		}
-+	}
-+
-+	/* Insert by priority */
-+	else {
-+		/* legal priorities 0-7 */
-+		ASSERT(PKTPRIO(p) <= MAXPRIO);
-+
-+		ASSERT(q->head);
-+		ASSERT(q->tail);
-+		/* Shortcut to insertion at tail */
-+		if (_pktq_pri(q, PKTPRIO(p)) < _pktq_pri(q, PKTPRIO(q->tail)) ||
-+		    (!lifo && _pktq_pri(q, PKTPRIO(p)) <= _pktq_pri(q, PKTPRIO(q->tail)))) {
-+			prev = q->tail;
-+			next = NULL;
-+		}
-+		/* Insert at head or in the middle */
-+		else {
-+			prev = NULL;
-+			next = q->head;
-+		}
-+		/* Walk the queue */
-+		for (; next; prev = next, next = PKTLINK(next)) {
-+			/* Priority queue invariant */
-+			ASSERT(!prev || _pktq_pri(q, PKTPRIO(prev)) >= _pktq_pri(q, PKTPRIO(next)));
-+			/* Insert at head of string of packets of same priority (LIFO) */
-+			if (lifo) {
-+				if (_pktq_pri(q, PKTPRIO(p)) >= _pktq_pri(q, PKTPRIO(next)))
-+					break;
-+			}
-+			/* Insert at tail of string of packets of same priority (FIFO) */
-+			else {
-+				if (_pktq_pri(q, PKTPRIO(p)) > _pktq_pri(q, PKTPRIO(next)))
-+					break;
-+			}
-+		}
-+		/* Insert at tail */
-+		if (next == NULL) {
-+			ASSERT(PKTLINK(q->tail) == NULL);
-+			PKTSETLINK(q->tail, p);
-+			PKTSETLINK(p, NULL);
-+			q->tail = p;
-+		}
-+		/* Insert in the middle */
-+		else if (prev) {
-+			PKTSETLINK(prev, p);
-+			PKTSETLINK(p, next);
-+		}
-+		/* Insert at head */
-+		else {
-+			PKTSETLINK(p, q->head);
-+			q->head = p;
-+		}
-+	}
-+
-+	/* List invariants after insertion */
-+	ASSERT(q->head);
-+	ASSERT(PKTLINK(q->tail) == NULL);
-+
-+	q->len++;
-+}
-+
-+/* dequeue packet at head */
-+void*
-+pktdeq(struct pktq *q)
-+{
-+	void *p;
-+
-+	if ((p = q->head)) {
-+		ASSERT(q->tail);
-+		q->head = PKTLINK(p);
-+		PKTSETLINK(p, NULL);
-+		q->len--;
-+		if (q->head == NULL)
-+			q->tail = NULL;
-+	}
-+	else {
-+		ASSERT(q->tail == NULL);
-+	}
-+
-+	return (p);
-+}
-+
-+/* dequeue packet at tail */
-+void*
-+pktdeqtail(struct pktq *q)
-+{
-+	void *p;
-+	void *next, *prev;
-+
-+	if (q->head == q->tail) {  /* last packet on queue or queue empty */
-+		p = q->head;
-+		q->head = q->tail = NULL;
-+		q->len = 0;
-+		return(p);
-+	}
-+
-+	/* start walk at head */
-+	prev = NULL;
-+	next = q->head;
-+
-+	/* Walk the queue to find prev of q->tail */
-+	for (; next; prev = next, next = PKTLINK(next)) {
-+		if (next == q->tail)
-+			break;
-+	}
-+
-+	ASSERT(prev);
-+
-+	PKTSETLINK(prev, NULL);
-+	q->tail = prev;
-+	q->len--;
-+	p = next;
-+
-+	return (p);
-+}
-+
-+unsigned char bcm_ctype[] = {
-+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 0-7 */
-+	_BCM_C,_BCM_C|_BCM_S,_BCM_C|_BCM_S,_BCM_C|_BCM_S,_BCM_C|_BCM_S,_BCM_C|_BCM_S,_BCM_C,_BCM_C,		/* 8-15 */
-+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 16-23 */
-+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 24-31 */
-+	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 32-39 */
-+	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 40-47 */
-+	_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,			/* 48-55 */
-+	_BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 56-63 */
-+	_BCM_P,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U|_BCM_X,_BCM_U,	/* 64-71 */
-+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 72-79 */
-+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 80-87 */
-+	_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 88-95 */
-+	_BCM_P,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L|_BCM_X,_BCM_L,	/* 96-103 */
-+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,			/* 104-111 */
-+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,			/* 112-119 */
-+	_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C,			/* 120-127 */
-+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,		/* 128-143 */
-+	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,		/* 144-159 */
-+	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,   /* 160-175 */
-+	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,       /* 176-191 */
-+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,       /* 192-207 */
-+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_L,       /* 208-223 */
-+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,       /* 224-239 */
-+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L        /* 240-255 */
-+};
-+
-+uchar
-+bcm_toupper(uchar c)
-+{
-+	if (bcm_islower(c))
-+		c -= 'a'-'A';
-+	return (c);
-+}
-+
-+ulong
-+bcm_strtoul(char *cp, char **endp, uint base)
-+{
-+	ulong result, value;
-+	bool minus;
-+	
-+	minus = FALSE;
++	ulong result, value;
++	bool minus;
++	
++	minus = FALSE;
 +
 +	while (bcm_isspace(*cp))
 +		cp++;
@@ -12393,2530 +11826,5655 @@ diff -Nur linux-2.4.32/drivers/net/hnd/bcmutils.c linux-2.4.32-brcm/drivers/net/
 +    return crc;
 +}
 +
-+#ifdef notdef
-+#define CLEN 	1499
-+#define CBUFSIZ 	(CLEN+4)
-+#define CNBUFS		5
++#ifdef notdef
++#define CLEN 	1499
++#define CBUFSIZ 	(CLEN+4)
++#define CNBUFS		5
++
++void testcrc32(void)
++{
++	uint j,k,l;
++	uint8 *buf;
++	uint len[CNBUFS];
++	uint32 crcr;
++	uint32 crc32tv[CNBUFS] =
++		{0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
++
++	ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
++
++	/* step through all possible alignments */
++	for (l=0;l<=4;l++) {
++		for (j=0; j<CNBUFS; j++) {
++			len[j] = CLEN;
++			for (k=0; k<len[j]; k++)
++				*(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
++		}
++
++		for (j=0; j<CNBUFS; j++) {
++			crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
++			ASSERT(crcr == crc32tv[j]);
++		}
++	}
++
++	MFREE(buf, CBUFSIZ*CNBUFS);
++	return;
++}
++#endif
++
++
++/* 
++ * Advance from the current 1-byte tag/1-byte length/variable-length value 
++ * triple, to the next, returning a pointer to the next.
++ * If the current or next TLV is invalid (does not fit in given buffer length),
++ * NULL is returned.
++ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
++ * by the TLV paramter's length if it is valid.
++ */
++bcm_tlv_t *
++bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
++{
++	int len;
++
++	/* validate current elt */
++	if (!bcm_valid_tlv(elt, *buflen))
++		return NULL;
++	
++	/* advance to next elt */
++	len = elt->len;
++	elt = (bcm_tlv_t*)(elt->data + len);
++	*buflen -= (2 + len);
++	
++	/* validate next elt */
++	if (!bcm_valid_tlv(elt, *buflen))
++		return NULL;
++	
++	return elt;
++}
++
++/* 
++ * Traverse a string of 1-byte tag/1-byte length/variable-length value 
++ * triples, returning a pointer to the substring whose first element 
++ * matches tag
++ */
++bcm_tlv_t *
++bcm_parse_tlvs(void *buf, int buflen, uint key)
++{
++	bcm_tlv_t *elt;
++	int totlen;
++
++	elt = (bcm_tlv_t*)buf;
++	totlen = buflen;
++
++	/* find tagged parameter */
++	while (totlen >= 2) {
++		int len = elt->len;
++
++		/* validate remaining totlen */
++		if ((elt->id == key) && (totlen >= (len + 2)))
++			return (elt);
++
++		elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
++		totlen -= (len + 2);
++	}
++	
++	return NULL;
++}
++
++/* 
++ * Traverse a string of 1-byte tag/1-byte length/variable-length value 
++ * triples, returning a pointer to the substring whose first element 
++ * matches tag.  Stop parsing when we see an element whose ID is greater
++ * than the target key. 
++ */
++bcm_tlv_t *
++bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
++{
++	bcm_tlv_t *elt;
++	int totlen;
++
++	elt = (bcm_tlv_t*)buf;
++	totlen = buflen;
++
++	/* find tagged parameter */
++	while (totlen >= 2) {
++		uint id = elt->id;
++		int len = elt->len;
++		
++		/* Punt if we start seeing IDs > than target key */
++		if (id > key)
++			return(NULL);
++
++		/* validate remaining totlen */
++		if ((id == key) && (totlen >= (len + 2)))
++			return (elt);
++
++		elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
++		totlen -= (len + 2);
++	}
++	return NULL;
++}
++/* routine to dump fields in a fileddesc structure */
++
++uint 
++bcmdumpfields(readreg_rtn read_rtn, void *arg0, void *arg1, struct fielddesc *fielddesc_array, char *buf, uint32 bufsize)
++{
++	uint  filled_len;
++	uint len;
++	struct fielddesc *cur_ptr;
++
++	filled_len = 0;
++	cur_ptr = fielddesc_array; 
++
++	while (bufsize > (filled_len + 64)) {
++		if (cur_ptr->nameandfmt == NULL)
++			break;
++		len = sprintf(buf, cur_ptr->nameandfmt, read_rtn(arg0, arg1, cur_ptr->offset));
++		buf += len;
++		filled_len += len;
++		cur_ptr++;
++	}
++	return filled_len;
++}
++
++uint
++bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
++{
++	uint len;
++
++	len = strlen(name) + 1;
++	
++	if ((len + datalen) > buflen)
++		return 0;
++
++	strcpy(buf, name);
++
++	/* append data onto the end of the name string */
++	memcpy(&buf[len], data, datalen);
++	len += datalen;
++
++	return len;
++}
++
++/* Quarter dBm units to mW
++ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
++ * Table is offset so the last entry is largest mW value that fits in
++ * a uint16.
++ */
++
++#define QDBM_OFFSET 153
++#define QDBM_TABLE_LEN 40
++
++/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
++ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
++ */
++#define QDBM_TABLE_LOW_BOUND 6493
++
++/* Largest mW value that will round down to the last table entry,
++ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
++ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
++ */
++#define QDBM_TABLE_HIGH_BOUND 64938
++
++static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
++/* qdBm:        +0		+1		+2		+3		+4		+5		+6		+7	*/
++/* 153: */      6683,	7079,	7499,	7943,	8414,	8913,	9441,	10000,
++/* 161: */      10593,	11220,	11885,	12589,	13335,	14125,	14962,	15849,
++/* 169: */      16788,	17783,	18836,	19953,	21135,	22387,	23714,	25119,
++/* 177: */      26607,	28184,	29854,	31623,	33497,	35481,	37584,	39811,
++/* 185: */      42170,	44668,	47315,	50119,	53088,	56234,	59566,	63096
++};
++
++uint16
++bcm_qdbm_to_mw(uint8 qdbm)
++{
++	uint factor = 1;
++	int idx = qdbm - QDBM_OFFSET;
++	
++	if (idx > QDBM_TABLE_LEN) {
++		/* clamp to max uint16 mW value */
++		return 0xFFFF;
++	}
++	
++	/* scale the qdBm index up to the range of the table 0-40
++	 * where an offset of 40 qdBm equals a factor of 10 mW.
++	 */
++	while (idx < 0) {
++		idx += 40;
++		factor *= 10;
++	}
++	
++	/* return the mW value scaled down to the correct factor of 10,
++	 * adding in factor/2 to get proper rounding. */
++	return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
++}
++
++uint8
++bcm_mw_to_qdbm(uint16 mw)
++{
++	uint8 qdbm;
++	int offset;
++	uint mw_uint = mw;
++	uint boundary;
++	
++	/* handle boundary case */
++	if (mw_uint <= 1)
++		return 0;
++	
++	offset = QDBM_OFFSET;
++	
++	/* move mw into the range of the table */
++	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
++		mw_uint *= 10;
++		offset -= 40;
++	}
++
++	for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
++		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - nqdBm_to_mW_map[qdbm])/2;
++		if (mw_uint < boundary) break;
++	}
++
++	qdbm += (uint8)offset;
++
++	return(qdbm);
++}
+diff -Naur linux.old/drivers/net/hnd/linux_osl.c linux.dev/drivers/net/hnd/linux_osl.c
+--- linux.old/drivers/net/hnd/linux_osl.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/hnd/linux_osl.c	2006-04-06 15:34:15.000000000 +0200
+@@ -0,0 +1,708 @@
++/*
++ * Linux OS Independent Layer
++ *
++ * Copyright 2005, Broadcom Corporation
++ * All Rights Reserved.
++ * 
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
++ *
++ * $Id$
++ */
++
++#define LINUX_OSL
++
++#include <typedefs.h>
++#include <bcmendian.h>
++#include <linux/module.h>
++#include <linuxver.h>
++#include <osl.h>
++#include <bcmutils.h>
++#include <linux/delay.h>
++#ifdef mips
++#include <asm/paccess.h>
++#endif
++#include <pcicfg.h>
++
++#define PCI_CFG_RETRY 		10	
++
++#define OS_HANDLE_MAGIC		0x1234abcd
++#define BCM_MEM_FILENAME_LEN 	24
++
++typedef struct bcm_mem_link {
++	struct bcm_mem_link *prev;
++	struct bcm_mem_link *next;
++	uint	size;
++	int	line;
++	char	file[BCM_MEM_FILENAME_LEN];
++} bcm_mem_link_t;
++
++struct os_handle {
++	uint magic;
++	void *pdev;
++	uint malloced;
++	uint failed;
++	bcm_mem_link_t *dbgmem_list;
++};
++
++static int16 linuxbcmerrormap[] =  \
++{	0, 			/* 0 */
++	-EINVAL,		/* BCME_ERROR */
++	-EINVAL,		/* BCME_BADARG*/
++	-EINVAL,		/* BCME_BADOPTION*/
++	-EINVAL,		/* BCME_NOTUP */
++	-EINVAL,		/* BCME_NOTDOWN */
++	-EINVAL,		/* BCME_NOTAP */
++	-EINVAL,		/* BCME_NOTSTA */
++	-EINVAL,		/* BCME_BADKEYIDX */
++	-EINVAL,		/* BCME_RADIOOFF */
++	-EINVAL,		/* BCME_NOTBANDLOCKED */
++	-EINVAL, 		/* BCME_NOCLK */
++	-EINVAL, 		/* BCME_BADRATESET */
++	-EINVAL, 		/* BCME_BADBAND */
++	-E2BIG,			/* BCME_BUFTOOSHORT */
++	-E2BIG,			/* BCME_BUFTOOLONG */
++	-EBUSY, 		/* BCME_BUSY */
++	-EINVAL, 		/* BCME_NOTASSOCIATED */
++	-EINVAL, 		/* BCME_BADSSIDLEN */
++	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
++	-EINVAL, 		/* BCME_BADCHAN */
++	-EFAULT, 		/* BCME_BADADDR */
++	-ENOMEM, 		/* BCME_NORESOURCE */
++	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
++	-EMSGSIZE,		/* BCME_BADLENGTH */
++	-EINVAL,		/* BCME_NOTREADY */
++	-EPERM,			/* BCME_NOTPERMITTED */
++	-ENOMEM, 		/* BCME_NOMEM */
++	-EINVAL, 		/* BCME_ASSOCIATED */
++	-ERANGE, 		/* BCME_RANGE */
++	-EINVAL 		/* BCME_NOTFOUND */
++}; 
++
++/* translate bcmerrors into linux errors*/
++int 
++osl_error(int bcmerror)
++{
++	int abs_bcmerror;
++	int array_size = ARRAYSIZE(linuxbcmerrormap); 
++	
++	abs_bcmerror = ABS(bcmerror);	
++
++	if (bcmerror > 0)
++		abs_bcmerror = 0;
++
++ 	else if (abs_bcmerror >= array_size)
++		abs_bcmerror = BCME_ERROR;
++
++	return linuxbcmerrormap[abs_bcmerror];
++}
++
++osl_t *
++osl_attach(void *pdev)
++{
++	osl_t *osh;
++
++	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
++	ASSERT(osh);
++
++	/* 
++	 * check the cases where 
++	 * 1.Error code Added to bcmerror table, but forgot to add it to the OS 
++	 * dependent error code
++	 * 2. Error code is added to the bcmerror table, but forgot to add the 
++	 * corresponding errorstring(dummy call to bcmerrorstr)
++	 */
++	bcmerrorstr(0);
++	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
++
++	osh->magic = OS_HANDLE_MAGIC;
++	osh->malloced = 0;
++	osh->failed = 0;
++	osh->dbgmem_list = NULL;
++	osh->pdev = pdev;
++
++	return osh;
++}
++
++void
++osl_detach(osl_t *osh)
++{
++	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC));
++	kfree(osh);
++}
++
++void*
++osl_pktget(osl_t *osh, uint len, bool send)
++{
++	struct sk_buff *skb;
++
++	if ((skb = dev_alloc_skb(len)) == NULL)
++		return (NULL);
++
++	skb_put(skb, len);
++
++	/* ensure the cookie field is cleared */ 
++	PKTSETCOOKIE(skb, NULL);
++
++	return ((void*) skb);
++}
++
++void
++osl_pktfree(void *p)
++{
++	struct sk_buff *skb, *nskb;
++
++	skb = (struct sk_buff*) p;
++
++	/* perversion: we use skb->next to chain multi-skb packets */
++	while (skb) {
++		nskb = skb->next;
++		skb->next = NULL;
++		if (skb->destructor) {
++			/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if destructor exists */
++			dev_kfree_skb_any(skb);
++		} else {
++			/* can free immediately (even in_irq()) if destructor does not exist */
++			dev_kfree_skb(skb);
++		}
++		skb = nskb;
++	}
++}
++
++uint32
++osl_pci_read_config(osl_t *osh, uint offset, uint size)
++{
++	uint val;
++	uint retry=PCI_CFG_RETRY;	 
++
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++
++	/* only 4byte access supported */
++	ASSERT(size == 4);
++
++	do {
++		pci_read_config_dword(osh->pdev, offset, &val);
++		if (val != 0xffffffff)
++			break;
++	} while (retry--);
++
++
++	return (val);
++}
++
++void
++osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
++{
++	uint retry=PCI_CFG_RETRY;	 
++
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++
++	/* only 4byte access supported */
++	ASSERT(size == 4);
++
++	do {
++		pci_write_config_dword(osh->pdev, offset, val);
++		if (offset!=PCI_BAR0_WIN)
++			break;
++		if (osl_pci_read_config(osh,offset,size) == val) 
++			break;
++	} while (retry--);
++
++}
++
++/* return bus # for the pci device pointed by osh->pdev */
++uint
++osl_pci_bus(osl_t *osh)
++{
++	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
++
++	return ((struct pci_dev *)osh->pdev)->bus->number;
++}
++
++/* return slot # for the pci device pointed by osh->pdev */
++uint
++osl_pci_slot(osl_t *osh)
++{
++	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
++
++	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
++}
++
++static void
++osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
++{
++}
++
++void
++osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
++{
++	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
++}
++
++void
++osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
++{
++	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
++}
++
++
++#ifdef BCMDBG_MEM
++
++void*
++osl_debug_malloc(osl_t *osh, uint size, int line, char* file)
++{
++	bcm_mem_link_t *p;
++	char* basename;
++
++	ASSERT(size);
++	
++	if ((p = (bcm_mem_link_t*)osl_malloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL)
++		return (NULL);
++	
++	p->size = size;
++	p->line = line;
++	
++	basename = strrchr(file, '/');
++	/* skip the '/' */
++	if (basename)
++		basename++;
++
++	if (!basename)
++		basename = file;
++	
++	strncpy(p->file, basename, BCM_MEM_FILENAME_LEN);
++	p->file[BCM_MEM_FILENAME_LEN - 1] = '\0';
++
++	/* link this block */
++	p->prev = NULL;
++	p->next = osh->dbgmem_list;
++	if (p->next)
++		p->next->prev = p;
++	osh->dbgmem_list = p;
++
++	return p + 1;
++}
++
++void
++osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, char* file)
++{
++	bcm_mem_link_t *p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
++	
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++
++	if (p->size == 0) {
++		printk("osl_debug_mfree: double free on addr 0x%x size %d at line %d file %s\n", 
++			(uint)addr, size, line, file);
++		ASSERT(p->size);
++		return;
++	}
++
++	if (p->size != size) {
++		printk("osl_debug_mfree: dealloc size %d does not match alloc size %d on addr 0x%x at line %d file %s\n",
++		       size, p->size, (uint)addr, line, file);
++		ASSERT(p->size == size);
++		return;
++	}
++
++	/* unlink this block */
++	if (p->prev)
++		p->prev->next = p->next;
++	if (p->next)
++		p->next->prev = p->prev;
++	if (osh->dbgmem_list == p)
++		osh->dbgmem_list = p->next;
++	p->next = p->prev = NULL;
++
++	osl_mfree(osh, p, size + sizeof(bcm_mem_link_t));
++}
++
++char*
++osl_debug_memdump(osl_t *osh, char *buf, uint sz)
++{
++	bcm_mem_link_t *p;
++	char *obuf;
++	
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	obuf = buf;
++
++	buf += sprintf(buf, "   Address\tSize\tFile:line\n");
++	for (p = osh->dbgmem_list; p && ((buf - obuf) < (sz - 128)); p = p->next)
++		buf += sprintf(buf, "0x%08x\t%5d\t%s:%d\n",
++			(int)p + sizeof(bcm_mem_link_t), p->size, p->file, p->line);
++
++	return (obuf);
++}
++
++#endif	/* BCMDBG_MEM */
++
++void*
++osl_malloc(osl_t *osh, uint size)
++{
++	void *addr;
++	
++	/* only ASSERT if osh is defined */
++	if (osh)
++		ASSERT(osh->magic == OS_HANDLE_MAGIC);
++
++	if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
++		if(osh)
++			osh->failed++;
++		return (NULL);
++	}
++	if (osh)
++		osh->malloced += size;
++	
++	return (addr);
++}
++
++void
++osl_mfree(osl_t *osh, void *addr, uint size)
++{
++	if (osh) {
++		ASSERT(osh->magic == OS_HANDLE_MAGIC);
++		osh->malloced -= size;
++	}
++	kfree(addr);
++}
++
++uint
++osl_malloced(osl_t *osh)
++{
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	return (osh->malloced);
++}
++
++uint osl_malloc_failed(osl_t *osh)
++{
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	return (osh->failed);
++}
++
++void*
++osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap)
++{
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++
++	return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
++}
++
++void
++osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
++{
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++
++	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
++}
++
++uint
++osl_dma_map(osl_t *osh, void *va, uint size, int direction)
++{
++	int dir;
++	
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
++	return (pci_map_single(osh->pdev, va, size, dir));
++}
++
++void
++osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
++{
++	int dir;
++	
++	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
++	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
++}
++
++#if defined(BINOSL)
++void
++osl_assert(char *exp, char *file, int line)
++{
++	char tempbuf[255];
++
++	sprintf(tempbuf, "assertion \"%s\" failed: file \"%s\", line %d\n", exp, file, line);
++	panic(tempbuf);
++}
++#endif	/* BCMDBG || BINOSL */
++
++void
++osl_delay(uint usec)
++{
++	uint d;
++
++	while (usec > 0) {
++		d = MIN(usec, 1000);
++		udelay(d);
++		usec -= d;
++	}
++}
++
++/*
++ * BINOSL selects the slightly slower function-call-based binary compatible osl.
++ */
++#ifdef BINOSL
++
++int
++osl_printf(const char *format, ...)
++{
++	va_list args;
++	char buf[1024];
++	int len;
++
++	/* sprintf into a local buffer because there *is* no "vprintk()".. */
++	va_start(args, format);
++	len = vsprintf(buf, format, args);
++	va_end(args);
++
++	if (len > sizeof (buf)) {
++		printk("osl_printf: buffer overrun\n");
++		return (0);
++	}
++
++	return (printk(buf));
++}
++
++int
++osl_sprintf(char *buf, const char *format, ...)
++{
++	va_list args;
++	int rc;
++
++	va_start(args, format);
++	rc = vsprintf(buf, format, args);
++	va_end(args);
++	return (rc);
++}
++
++int
++osl_strcmp(const char *s1, const char *s2)
++{
++	return (strcmp(s1, s2));
++}
++
++int
++osl_strncmp(const char *s1, const char *s2, uint n)
++{
++	return (strncmp(s1, s2, n));
++}
++
++int
++osl_strlen(const char *s)
++{
++	return (strlen(s));
++}
++
++char*
++osl_strcpy(char *d, const char *s)
++{
++	return (strcpy(d, s));
++}
++
++char*
++osl_strncpy(char *d, const char *s, uint n)
++{
++	return (strncpy(d, s, n));
++}
++
++void
++bcopy(const void *src, void *dst, int len)
++{
++	memcpy(dst, src, len);
++}
++
++int
++bcmp(const void *b1, const void *b2, int len)
++{
++	return (memcmp(b1, b2, len));
++}
++
++void
++bzero(void *b, int len)
++{
++	memset(b, '\0', len);
++}
++
++uint32
++osl_readl(volatile uint32 *r)
++{
++	return (readl(r));
++}
++
++uint16
++osl_readw(volatile uint16 *r)
++{
++	return (readw(r));
++}
++
++uint8
++osl_readb(volatile uint8 *r)
++{
++	return (readb(r));
++}
++
++void
++osl_writel(uint32 v, volatile uint32 *r)
++{
++	writel(v, r);
++}
++
++void
++osl_writew(uint16 v, volatile uint16 *r)
++{
++	writew(v, r);
++}
++
++void
++osl_writeb(uint8 v, volatile uint8 *r)
++{
++	writeb(v, r);
++}
++
++void *
++osl_uncached(void *va)
++{
++#ifdef mips
++	return ((void*)KSEG1ADDR(va));
++#else
++	return ((void*)va);
++#endif
++}
++
++uint
++osl_getcycles(void)
++{
++	uint cycles;
++
++#if defined(mips)
++	cycles = read_c0_count() * 2;
++#elif defined(__i386__)
++	rdtscl(cycles);
++#else
++	cycles = 0;
++#endif
++	return cycles;
++}
++
++void *
++osl_reg_map(uint32 pa, uint size)
++{
++	return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
++}
++
++void
++osl_reg_unmap(void *va)
++{
++	iounmap(va);
++}
++
++int
++osl_busprobe(uint32 *val, uint32 addr)
++{
++#ifdef mips
++	return get_dbe(*val, (uint32*)addr);
++#else
++	*val = readl(addr);
++	return 0;
++#endif
++}
++
++uchar*
++osl_pktdata(osl_t *osh, void *skb)
++{
++	return (((struct sk_buff*)skb)->data);
++}
++
++uint
++osl_pktlen(osl_t *osh, void *skb)
++{
++	return (((struct sk_buff*)skb)->len);
++}
++
++uint
++osl_pktheadroom(osl_t *osh, void *skb)
++{
++	return (uint) skb_headroom((struct sk_buff *) skb);
++}
++
++uint
++osl_pkttailroom(osl_t *osh, void *skb)
++{
++	return (uint) skb_tailroom((struct sk_buff *) skb);
++}
++
++void*
++osl_pktnext(osl_t *osh, void *skb)
++{
++	return (((struct sk_buff*)skb)->next);
++}
++
++void
++osl_pktsetnext(void *skb, void *x)
++{
++	((struct sk_buff*)skb)->next = (struct sk_buff*)x;
++}
++
++void
++osl_pktsetlen(osl_t *osh, void *skb, uint len)
++{
++	__skb_trim((struct sk_buff*)skb, len);
++}
++
++uchar*
++osl_pktpush(osl_t *osh, void *skb, int bytes)
++{
++	return (skb_push((struct sk_buff*)skb, bytes));
++}
++
++uchar*
++osl_pktpull(osl_t *osh, void *skb, int bytes)
++{
++	return (skb_pull((struct sk_buff*)skb, bytes));
++}
++
++void*
++osl_pktdup(osl_t *osh, void *skb)
++{
++	return (skb_clone((struct sk_buff*)skb, GFP_ATOMIC));
++}
++
++void*
++osl_pktcookie(void *skb)
++{
++	return ((void*)((struct sk_buff*)skb)->csum);
++}
++
++void
++osl_pktsetcookie(void *skb, void *x)
++{
++	((struct sk_buff*)skb)->csum = (uint)x;
++}
++
++void*
++osl_pktlink(void *skb)
++{
++	return (((struct sk_buff*)skb)->prev);
++}
++
++void
++osl_pktsetlink(void *skb, void *x)
++{
++	((struct sk_buff*)skb)->prev = (struct sk_buff*)x;
++}
++
++uint
++osl_pktprio(void *skb)
++{
++	return (((struct sk_buff*)skb)->priority);
++}
++
++void
++osl_pktsetprio(void *skb, uint x)
++{
++	((struct sk_buff*)skb)->priority = x;
++}
++
++
++#endif	/* BINOSL */
+diff -Naur linux.old/drivers/net/hnd/sbutils.c linux.dev/drivers/net/hnd/sbutils.c
+--- linux.old/drivers/net/hnd/sbutils.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/hnd/sbutils.c	2006-04-06 15:34:15.000000000 +0200
+@@ -0,0 +1,2837 @@
++/*
++ * Misc utility routines for accessing chip-specific features
++ * of the SiliconBackplane-based Broadcom chips.
++ *
++ * Copyright 2005, Broadcom Corporation
++ * All Rights Reserved.
++ * 
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
++ * $Id$
++ */
++
++#include <typedefs.h>
++#include <osl.h>
++#include <sbutils.h>
++#include <bcmutils.h>
++#include <bcmdevs.h>
++#include <sbconfig.h>
++#include <sbchipc.h>
++#include <sbpci.h>
++#include <sbpcie.h>
++#include <pcicfg.h>
++#include <sbpcmcia.h>
++#include <sbextif.h>
++#include <bcmsrom.h>
++
++/* debug/trace */
++#define	SB_ERROR(args)
++
++
++typedef uint32 (*sb_intrsoff_t)(void *intr_arg);
++typedef void (*sb_intrsrestore_t)(void *intr_arg, uint32 arg);
++typedef bool (*sb_intrsenabled_t)(void *intr_arg);
++
++/* misc sb info needed by some of the routines */
++typedef struct sb_info {
++
++	struct sb_pub  	sb;			/* back plane public state(must be first field of sb_info */
++
++	void	*osh;			/* osl os handle */
++	void	*sdh;			/* bcmsdh handle */
++
++	void	*curmap;		/* current regs va */
++	void	*regs[SB_MAXCORES];	/* other regs va */
++
++	uint	curidx;			/* current core index */
++	uint	dev_coreid;		/* the core provides driver functions */
++
++	bool	memseg;			/* flag to toggle MEM_SEG register */
++
++	uint	gpioidx;		/* gpio control core index */
++	uint	gpioid;			/* gpio control coretype */
++
++	uint	numcores;		/* # discovered cores */
++	uint	coreid[SB_MAXCORES];	/* id of each core */
++
++	void	*intr_arg;		/* interrupt callback function arg */
++	sb_intrsoff_t		intrsoff_fn;		/* function turns chip interrupts off */
++	sb_intrsrestore_t	intrsrestore_fn;	/* function restore chip interrupts */
++	sb_intrsenabled_t	intrsenabled_fn;	/* function to check if chip interrupts are enabled */
++
++} sb_info_t;
++
++/* local prototypes */
++static sb_info_t * BCMINIT(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
++	uint bustype, void *sdh, char **vars, int *varsz);
++static void BCMINIT(sb_scan)(sb_info_t *si);
++static uint sb_corereg(sb_info_t *si, uint coreidx, uint regoff, uint mask, uint val);
++static uint _sb_coreidx(sb_info_t *si);
++static uint sb_findcoreidx(sb_info_t *si, uint coreid, uint coreunit);
++static uint BCMINIT(sb_pcidev2chip)(uint pcidev);
++static uint BCMINIT(sb_chip2numcores)(uint chip);
++static bool sb_ispcie(sb_info_t *si);
++static bool sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen);
++static int sb_pci_fixcfg(sb_info_t *si);
++
++/* routines to access mdio slave device registers */
++static int sb_pcie_mdiowrite(sb_info_t *si,  uint physmedia, uint readdr, uint val);
++static void BCMINIT(sb_war30841)(sb_info_t *si);
++
++/* delay needed between the mdio control/ mdiodata register data access */
++#define PR28829_DELAY() OSL_DELAY(10)
++
++
++/* global variable to indicate reservation/release of gpio's*/
++static uint32 sb_gpioreservation = 0;
++
++#define	SB_INFO(sbh)	(sb_info_t*)sbh
++#define	SET_SBREG(sbh, r, mask, val)	W_SBREG((sbh), (r), ((R_SBREG((sbh), (r)) & ~(mask)) | (val)))
++#define	GOODCOREADDR(x)	(((x) >= SB_ENUM_BASE) && ((x) <= SB_ENUM_LIM) && ISALIGNED((x), SB_CORE_SIZE))
++#define	GOODREGS(regs)	((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
++#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
++#define	GOODIDX(idx)	(((uint)idx) < SB_MAXCORES)
++#define	BADIDX		(SB_MAXCORES+1)
++#define	NOREV		-1
++
++#define PCI(si)		((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI)) 
++#define PCIE(si)	((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE)) 
++
++/* sonicsrev */
++#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
++#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
++
++#define	R_SBREG(sbh, sbr)	sb_read_sbreg((sbh), (sbr))
++#define	W_SBREG(sbh, sbr, v)	sb_write_sbreg((sbh), (sbr), (v))
++#define	AND_SBREG(sbh, sbr, v)	W_SBREG((sbh), (sbr), (R_SBREG((sbh), (sbr)) & (v)))
++#define	OR_SBREG(sbh, sbr, v)	W_SBREG((sbh), (sbr), (R_SBREG((sbh), (sbr)) | (v)))
++
++/*
++ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
++ * after core switching to avoid invalid register accesss inside ISR.
++ */
++#define INTR_OFF(si, intr_val) \
++	if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
++		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
++#define INTR_RESTORE(si, intr_val) \
++	if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
++		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
++
++/* dynamic clock control defines */
++#define	LPOMINFREQ	25000			/* low power oscillator min */
++#define	LPOMAXFREQ	43000			/* low power oscillator max */
++#define	XTALMINFREQ	19800000		/* 20 MHz - 1% */
++#define	XTALMAXFREQ	20200000		/* 20 MHz + 1% */
++#define	PCIMINFREQ	25000000		/* 25 MHz */
++#define	PCIMAXFREQ	34000000		/* 33 MHz + fudge */
++
++#define	ILP_DIV_5MHZ	0			/* ILP = 5 MHz */
++#define	ILP_DIV_1MHZ	4			/* ILP = 1 MHz */
++
++#define MIN_DUMPBUFLEN  32	/* debug */
++
++/* different register spaces to access thr'u pcie indirect access*/
++#define PCIE_CONFIGREGS 	1
++#define PCIE_PCIEREGS 		2
++
++/* GPIO Based LED powersave defines */
++#define DEFAULT_GPIO_ONTIME	10
++#define DEFAULT_GPIO_OFFTIME	90
++
++#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
++
++static uint32
++sb_read_sbreg(sb_info_t *si, volatile uint32 *sbr)
++{
++	uint8 tmp;
++	uint32 val, intr_val = 0;
++
++
++	/*
++	 * compact flash only has 11 bits address, while we needs 12 bits address.
++	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
++	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
++	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
++	 */
++	if(si->memseg) {
++		INTR_OFF(si, intr_val);
++		tmp = 1;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
++		sbr = (uint32) ((uintptr) sbr & ~(1 << 11));	/* mask out bit 11*/
++	}
++
++	val = R_REG(sbr);
++
++	if(si->memseg) {
++		tmp = 0;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
++		INTR_RESTORE(si, intr_val);
++	}
++
++	return (val);
++}
++
++static void
++sb_write_sbreg(sb_info_t *si, volatile uint32 *sbr, uint32 v)
++{
++	uint8 tmp;
++	volatile uint32 dummy;
++	uint32 intr_val = 0;
++
++
++	/*
++	 * compact flash only has 11 bits address, while we needs 12 bits address.
++	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
++	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
++	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
++	 */
++	if(si->memseg) {
++		INTR_OFF(si, intr_val);
++		tmp = 1;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
++		sbr = (uint32) ((uintptr) sbr & ~(1 << 11));	/* mask out bit 11*/
++	}
++
++	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
++#ifdef IL_BIGENDIAN
++		dummy = R_REG(sbr);
++		W_REG(((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
++		dummy = R_REG(sbr);
++		W_REG((volatile uint16 *)sbr, (uint16)(v & 0xffff));
++#else
++		dummy = R_REG(sbr);
++		W_REG((volatile uint16 *)sbr, (uint16)(v & 0xffff));
++		dummy = R_REG(sbr);
++		W_REG(((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
++#endif
++	} else
++		W_REG(sbr, v);
++
++	if(si->memseg) {
++		tmp = 0;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
++		INTR_RESTORE(si, intr_val);
++	}
++}
++
++/*
++ * Allocate a sb handle.
++ * devid - pci device id (used to determine chip#)
++ * osh - opaque OS handle
++ * regs - virtual address of initial core registers
++ * bustype - pci/pcmcia/sb/sdio/etc
++ * vars - pointer to a pointer area for "environment" variables
++ * varsz - pointer to int to return the size of the vars
++ */
++sb_t * 
++BCMINITFN(sb_attach)(uint devid, osl_t *osh, void *regs,
++	uint bustype, void *sdh, char **vars, int *varsz)
++{
++	sb_info_t *si;
++
++	/* alloc sb_info_t */
++	if ((si = MALLOC(osh, sizeof (sb_info_t))) == NULL) {
++		SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
++		return (NULL);
++	}
++
++	if (BCMINIT(sb_doattach)(si, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
++		MFREE(osh, si, sizeof (sb_info_t));
++		return (NULL);
++	}
++	return (sb_t *)si;
++}
++
++/* Using sb_kattach depends on SB_BUS support, either implicit  */
++/* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
++#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
++
++/* global kernel resource */
++static sb_info_t ksi;
++
++/* generic kernel variant of sb_attach() */
++sb_t * 
++BCMINITFN(sb_kattach)()
++{
++	uint32 *regs;
++
++	if (ksi.curmap == NULL) {
++		uint32 cid;
++
++		regs = (uint32 *)REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
++		cid = R_REG((uint32 *)regs);
++		if (((cid & CID_ID_MASK) == BCM4712_DEVICE_ID) &&
++		    ((cid & CID_PKG_MASK) != BCM4712LARGE_PKG_ID) &&
++		    ((cid & CID_REV_MASK) <= (3 << CID_REV_SHIFT))) {
++			uint32 *scc, val;
++
++			scc = (uint32 *)((uchar*)regs + OFFSETOF(chipcregs_t, slow_clk_ctl));
++			val = R_REG(scc);
++			SB_ERROR(("    initial scc = 0x%x\n", val));
++			val |= SCC_SS_XTAL;
++			W_REG(scc, val);
++		}
++
++		if (BCMINIT(sb_doattach)(&ksi, BCM4710_DEVICE_ID, NULL, (void*)regs,
++			SB_BUS, NULL, NULL, NULL) == NULL) {
++			return NULL;
++		}
++	}
++
++	return (sb_t *)&ksi;
++}
++#endif
++
++static sb_info_t  * 
++BCMINITFN(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
++	uint bustype, void *sdh, char **vars, int *varsz)
++{
++	uint origidx;
++	chipcregs_t *cc;
++	sbconfig_t *sb;
++	uint32 w;
++
++	ASSERT(GOODREGS(regs));
++
++	bzero((uchar*)si, sizeof (sb_info_t));
++
++	si->sb.buscoreidx = si->gpioidx = BADIDX;
++
++	si->osh = osh;
++	si->curmap = regs;
++	si->sdh = sdh;
++
++	/* check to see if we are a sb core mimic'ing a pci core */
++	if (bustype == PCI_BUS) {
++		if (OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof (uint32)) == 0xffffffff)
++			bustype = SB_BUS;
++		else
++			bustype = PCI_BUS;
++	}
++
++	si->sb.bustype = bustype;
++	if (si->sb.bustype != BUSTYPE(si->sb.bustype)) {
++		SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n",
++			  si->sb.bustype, BUSTYPE(si->sb.bustype)));
++		return NULL;
++	}
++
++	/* need to set memseg flag for CF card first before any sb registers access */
++	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
++		si->memseg = TRUE;
++
++	/* kludge to enable the clock on the 4306 which lacks a slowclock */
++	if (BUSTYPE(si->sb.bustype) == PCI_BUS)
++		sb_clkctl_xtal(&si->sb, XTAL|PLL, ON);
++
++	if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
++		w = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN, sizeof (uint32));
++		if (!GOODCOREADDR(w))
++			OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof (uint32), SB_ENUM_BASE);
++	}
++
++	/* initialize current core index value */
++	si->curidx = _sb_coreidx(si);
++
++	if (si->curidx == BADIDX) {
++		SB_ERROR(("sb_doattach: bad core index\n"));
++		return NULL;
++	}
++
++	/* get sonics backplane revision */
++	sb = REGS2SB(si->curmap);
++	si->sb.sonicsrev = (R_SBREG(si, &(sb)->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
++
++	/* keep and reuse the initial register mapping */
++	origidx = si->curidx;
++	if (BUSTYPE(si->sb.bustype) == SB_BUS)
++		si->regs[origidx] = regs;
++
++	/* is core-0 a chipcommon core? */
++	si->numcores = 1;
++	cc = (chipcregs_t*) sb_setcoreidx(&si->sb, 0);
++	if (sb_coreid(&si->sb) != SB_CC)
++		cc = NULL;
++
++	/* determine chip id and rev */
++	if (cc) {
++		/* chip common core found! */
++		si->sb.chip = R_REG(&cc->chipid) & CID_ID_MASK;
++		si->sb.chiprev = (R_REG(&cc->chipid) & CID_REV_MASK) >> CID_REV_SHIFT;
++		si->sb.chippkg = (R_REG(&cc->chipid) & CID_PKG_MASK) >> CID_PKG_SHIFT;
++	} else {
++		/* The only pcmcia chip without a chipcommon core is a 4301 */
++		if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
++			devid = BCM4301_DEVICE_ID;
++
++		/* no chip common core -- must convert device id to chip id */
++		if ((si->sb.chip = BCMINIT(sb_pcidev2chip)(devid)) == 0) {
++			SB_ERROR(("sb_doattach: unrecognized device id 0x%04x\n", devid));
++			sb_setcoreidx(&si->sb, origidx);
++			return NULL;
++		}
++	}
++
++	/* get chipcommon rev */
++	si->sb.ccrev = cc ? (int)sb_corerev(&si->sb) : NOREV;
++
++	/* determine numcores */
++	if (cc && ((si->sb.ccrev == 4) || (si->sb.ccrev >= 6)))
++		si->numcores = (R_REG(&cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
++	else
++		si->numcores = BCMINIT(sb_chip2numcores)(si->sb.chip);
++
++	/* return to original core */
++	sb_setcoreidx(&si->sb, origidx);
++
++	/* sanity checks */
++	ASSERT(si->sb.chip);
++
++	/* scan for cores */
++	BCMINIT(sb_scan)(si);
++
++	/* fixup necessary chip/core configurations */
++	if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
++		if (sb_pci_fixcfg(si)) {
++			SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
++			return NULL;
++		}
++	}
++	
++	/* srom_var_init() depends on sb_scan() info */
++	if (srom_var_init(si, si->sb.bustype, si->curmap, osh, vars, varsz)) {
++		SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
++		return (NULL);
++	}
++	
++	if (cc == NULL) {
++		/*
++		 * The chip revision number is hardwired into all
++		 * of the pci function config rev fields and is
++		 * independent from the individual core revision numbers.
++		 * For example, the "A0" silicon of each chip is chip rev 0.
++		 * For PCMCIA we get it from the CIS instead.
++		 */
++		if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
++			ASSERT(vars);
++			si->sb.chiprev = getintvar(*vars, "chiprev");
++		} else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
++			w = OSL_PCI_READ_CONFIG(osh, PCI_CFG_REV, sizeof (uint32));
++			si->sb.chiprev = w & 0xff;
++		} else
++			si->sb.chiprev = 0;
++	}
++
++	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
++		w = getintvar(*vars, "regwindowsz");
++		si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
++	}
++
++	/* gpio control core is required */
++	if (!GOODIDX(si->gpioidx)) {
++		SB_ERROR(("sb_doattach: gpio control core not found\n"));
++		return NULL;
++	}
++
++	/* get boardtype and boardrev */
++	switch (BUSTYPE(si->sb.bustype)) {
++	case PCI_BUS:
++		/* do a pci config read to get subsystem id and subvendor id */
++		w = OSL_PCI_READ_CONFIG(osh, PCI_CFG_SVID, sizeof (uint32));
++		si->sb.boardvendor = w & 0xffff;
++		si->sb.boardtype = (w >> 16) & 0xffff;
++		break;
++
++	case PCMCIA_BUS:
++	case SDIO_BUS:
++		si->sb.boardvendor = getintvar(*vars, "manfid");
++		si->sb.boardtype = getintvar(*vars, "prodid");
++		break;
++
++	case SB_BUS:
++	case JTAG_BUS:
++		si->sb.boardvendor = VENDOR_BROADCOM;
++		if ((si->sb.boardtype = getintvar(NULL, "boardtype")) == 0)
++			si->sb.boardtype = 0xffff;
++		break;
++	}
++
++	if (si->sb.boardtype == 0) {
++		SB_ERROR(("sb_doattach: unknown board type\n"));
++		ASSERT(si->sb.boardtype);
++	}
++
++	/* setup the GPIO based LED powersave register */
++	if (si->sb.ccrev >= 16) {
++		w = getintvar(*vars, "gpiotimerval");
++		if (!w)
++			w = DEFAULT_GPIOTIMERVAL; 
++		sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
++	}
++
++
++	return (si);
++}
++
++uint
++sb_coreid(sb_t *sbh)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++
++	si = SB_INFO(sbh);
++	sb = REGS2SB(si->curmap);
++
++	return ((R_SBREG(si, &(sb)->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
++}
++
++uint
++sb_coreidx(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->curidx);
++}
++
++/* return current index of core */
++static uint
++_sb_coreidx(sb_info_t *si)
++{
++	sbconfig_t *sb;
++	uint32 sbaddr = 0;
++
++	ASSERT(si);
++
++	switch (BUSTYPE(si->sb.bustype)) {
++	case SB_BUS:
++		sb = REGS2SB(si->curmap);
++		sbaddr = sb_base(R_SBREG(si, &sb->sbadmatch0));
++		break;
++
++	case PCI_BUS:
++		sbaddr = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof (uint32));
++		break;
++
++	case PCMCIA_BUS: {
++		uint8 tmp = 0;
++
++		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
++		sbaddr  = (uint)tmp << 12;
++		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
++		sbaddr |= (uint)tmp << 16;
++		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
++		sbaddr |= (uint)tmp << 24;
++		break;
++	}
++
++#ifdef BCMJTAG
++	case JTAG_BUS:
++		sbaddr = (uint32)si->curmap;
++		break;
++#endif	/* BCMJTAG */
++
++	default:
++		ASSERT(0);
++	}
++
++	if (!GOODCOREADDR(sbaddr))
++		return BADIDX;
++
++	return ((sbaddr - SB_ENUM_BASE) / SB_CORE_SIZE);
++}
++
++uint
++sb_corevendor(sb_t *sbh)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++
++	si = SB_INFO(sbh);
++	sb = REGS2SB(si->curmap);
++
++	return ((R_SBREG(si, &(sb)->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
++}
++
++uint
++sb_corerev(sb_t *sbh)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++	uint sbidh;
++
++	si = SB_INFO(sbh);
++	sb = REGS2SB(si->curmap);
++	sbidh = R_SBREG(si, &(sb)->sbidhigh);
++
++	return (SBCOREREV(sbidh));
++}
++
++void *
++sb_osh(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return si->osh;
++}
++
++#define	SBTML_ALLOW	(SBTML_PE | SBTML_FGC | SBTML_FL_MASK)
++
++/* set/clear sbtmstatelow core-specific flags */
++uint32
++sb_coreflags(sb_t *sbh, uint32 mask, uint32 val)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++	uint32 w;
++
++	si = SB_INFO(sbh);
++	sb = REGS2SB(si->curmap);
++
++	ASSERT((val & ~mask) == 0);
++	ASSERT((mask & ~SBTML_ALLOW) == 0);
++
++	/* mask and set */
++	if (mask || val) {
++		w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
++		W_SBREG(si, &sb->sbtmstatelow, w);
++	}
++
++	/* return the new value */
++	return (R_SBREG(si, &sb->sbtmstatelow) & SBTML_ALLOW);
++}
++
++/* set/clear sbtmstatehigh core-specific flags */
++uint32
++sb_coreflagshi(sb_t *sbh, uint32 mask, uint32 val)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++	uint32 w;
++
++	si = SB_INFO(sbh);
++	sb = REGS2SB(si->curmap);
++
++	ASSERT((val & ~mask) == 0);
++	ASSERT((mask & ~SBTMH_FL_MASK) == 0);
++
++	/* mask and set */
++	if (mask || val) {
++		w = (R_SBREG(si, &sb->sbtmstatehigh) & ~mask) | val;
++		W_SBREG(si, &sb->sbtmstatehigh, w);
++	}
++
++	/* return the new value */
++	return (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_FL_MASK);
++}
++
++/* caller needs to take care of core-specific bist hazards */
++int
++sb_corebist(sb_t *sbh, uint coreid, uint coreunit)
++{
++	uint32 sblo;
++	uint coreidx;
++	sb_info_t *si;
++	int result = 0;
++
++	si = SB_INFO(sbh);
++
++	coreidx = sb_findcoreidx(si, coreid, coreunit);
++	if (!GOODIDX(coreidx))
++		result = BCME_ERROR;
++	else {
++		sblo = sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatelow), 0, 0);
++		sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatelow), ~0, (sblo | SBTML_FGC | SBTML_BE));
++		
++		SPINWAIT(((sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatehigh), 0, 0) & SBTMH_BISTD) == 0), 100000);
++	
++		if (sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatehigh), 0, 0) & SBTMH_BISTF)
++			result = BCME_ERROR;
++
++		sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatelow), ~0, sblo);
++	}
++
++	return result;
++}
++
++bool
++sb_iscoreup(sb_t *sbh)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++
++	si = SB_INFO(sbh);
++	sb = REGS2SB(si->curmap);
++
++	return ((R_SBREG(si, &(sb)->sbtmstatelow) & (SBTML_RESET | SBTML_REJ_MASK | SBTML_CLK)) == SBTML_CLK);
++}
++
++/*
++ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
++ * switch back to the original core, and return the new value.
++ */
++static uint
++sb_corereg(sb_info_t *si, uint coreidx, uint regoff, uint mask, uint val)
++{
++	uint origidx;
++	uint32 *r;
++	uint w;
++	uint intr_val = 0;
++
++	ASSERT(GOODIDX(coreidx));
++	ASSERT(regoff < SB_CORE_SIZE);
++	ASSERT((val & ~mask) == 0);
++
++	INTR_OFF(si, intr_val);
++
++	/* save current core index */
++	origidx = sb_coreidx(&si->sb);
++
++	/* switch core */
++	r = (uint32*) ((uchar*) sb_setcoreidx(&si->sb, coreidx) + regoff);
++
++	/* mask and set */
++	if (mask || val) {
++		if (regoff >= SBCONFIGOFF) {
++			w = (R_SBREG(si, r) & ~mask) | val;
++			W_SBREG(si, r, w);
++		} else {
++			w = (R_REG(r) & ~mask) | val;
++			W_REG(r, w);
++		}
++	}
++
++	/* readback */
++	if (regoff >= SBCONFIGOFF)
++		w = R_SBREG(si, r);
++	else
++		w = R_REG(r);
++
++	/* restore core index */
++	if (origidx != coreidx)
++		sb_setcoreidx(&si->sb, origidx);
++
++	INTR_RESTORE(si, intr_val);
++	return (w);
++}
++
++#define DWORD_ALIGN(x)  (x & ~(0x03))
++#define BYTE_POS(x) (x & 0x3)
++#define WORD_POS(x) (x & 0x1)
++
++#define BYTE_SHIFT(x)  (8 * BYTE_POS(x))
++#define WORD_SHIFT(x)  (16 * WORD_POS(x))
++
++#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
++#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
++
++#define read_pci_cfg_byte(a) \
++	(BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
++
++#define read_pci_cfg_write(a) \
++	(WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
++
++
++/* return TRUE if requested capability exists in the PCI config space */
++static bool 
++sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen)
++{
++	uint8 cap_id;
++	uint8 cap_ptr;
++	uint32 	bufsize;
++	uint8 byte_val;
++
++	if (BUSTYPE(si->sb.bustype) != PCI_BUS)
++		return FALSE;
++
++	/* check for Header type 0*/
++	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
++	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
++		return FALSE;
++
++	/* check if the capability pointer field exists */
++	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
++	if (!(byte_val & PCI_CAPPTR_PRESENT))
++		return FALSE;
++
++	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
++	/* check if the capability pointer is 0x00 */
++	if (cap_ptr == 0x00)
++		return FALSE;
++
++
++	/* loop thr'u the capability list and see if the pcie capabilty exists */
++
++	cap_id = read_pci_cfg_byte(cap_ptr);
++
++	while (cap_id != req_cap_id) {
++		cap_ptr = read_pci_cfg_byte((cap_ptr+1));
++		if (cap_ptr == 0x00) break;
++		cap_id = read_pci_cfg_byte(cap_ptr);
++	}
++	if (cap_id != req_cap_id) {
++		return FALSE;
++	}
++	/* found the caller requested capability */
++	if ((buf != NULL) &&  (buflen != NULL)) {
++		bufsize = *buflen;
++		if (!bufsize) goto end;
++		*buflen = 0;
++		/* copy the cpability data excluding cap ID and next ptr */
++		cap_ptr += 2;
++		if ((bufsize + cap_ptr)  > SZPCR)
++			bufsize = SZPCR - cap_ptr;
++		*buflen = bufsize;
++		while (bufsize--) {
++			*buf = read_pci_cfg_byte(cap_ptr);
++			cap_ptr++;
++			buf++;
++		}
++	}
++end:
++	return TRUE;
++}
++
++/* return TRUE if PCIE capability exists the pci config space */
++static bool
++sb_ispcie(sb_info_t *si)
++{
++	return(sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL));
++}
++
++/* scan the sb enumerated space to identify all cores */
++static void
++BCMINITFN(sb_scan)(sb_info_t *si)
++{
++	uint origidx;
++	uint i;
++	bool pci;
++	bool pcie;
++	uint pciidx;
++	uint pcieidx;
++	uint pcirev;
++	uint pcierev;
++
++
++
++	/* numcores should already be set */
++	ASSERT((si->numcores > 0) && (si->numcores <= SB_MAXCORES));
++
++	/* save current core index */
++	origidx = sb_coreidx(&si->sb);
++
++	si->sb.buscorerev = NOREV;
++	si->sb.buscoreidx = BADIDX;
++
++	si->gpioidx = BADIDX;
++
++	pci = pcie = FALSE;
++	pcirev = pcierev = NOREV;
++	pciidx = pcieidx = BADIDX;
++
++	for (i = 0; i < si->numcores; i++) {
++		sb_setcoreidx(&si->sb, i);
++		si->coreid[i] = sb_coreid(&si->sb);
++
++		if (si->coreid[i] == SB_PCI) { 
++			pciidx = i;
++			pcirev = sb_corerev(&si->sb);
++			pci = TRUE;
++		} else if (si->coreid[i] == SB_PCIE) {
++			pcieidx = i;
++			pcierev = sb_corerev(&si->sb);
++			pcie = TRUE;
++		} else if (si->coreid[i] == SB_PCMCIA) {
++			si->sb.buscorerev = sb_corerev(&si->sb);
++			si->sb.buscoretype = si->coreid[i];
++			si->sb.buscoreidx = i; 
++		}
++	}
++	if (pci && pcie) {
++		if (sb_ispcie(si))
++			pci = FALSE;
++		else
++			pcie = FALSE;
++	}
++	if (pci) {
++		si->sb.buscoretype = SB_PCI;
++		si->sb.buscorerev = pcirev; 
++		si->sb.buscoreidx = pciidx; 
++	}
++	else if (pcie) {
++		si->sb.buscoretype = SB_PCIE;
++		si->sb.buscorerev = pcierev; 
++		si->sb.buscoreidx = pcieidx; 
++	}
++
++	/*
++	 * Find the gpio "controlling core" type and index.
++	 * Precedence:
++	 * - if there's a chip common core - use that
++	 * - else if there's a pci core (rev >= 2) - use that
++	 * - else there had better be an extif core (4710 only)
++	 */
++	if (GOODIDX(sb_findcoreidx(si, SB_CC, 0))) {
++		si->gpioidx = sb_findcoreidx(si, SB_CC, 0);
++		si->gpioid = SB_CC;
++	} else if (PCI(si) && (si->sb.buscorerev >= 2)) {
++		si->gpioidx = si->sb.buscoreidx;
++		si->gpioid = SB_PCI;
++	} else if (sb_findcoreidx(si, SB_EXTIF, 0)) {
++		si->gpioidx = sb_findcoreidx(si, SB_EXTIF, 0);
++		si->gpioid = SB_EXTIF;
++	} else
++		ASSERT(si->gpioidx != BADIDX);
++
++	/* return to original core index */
++	sb_setcoreidx(&si->sb, origidx);
++}
++
++/* may be called with core in reset */
++void
++sb_detach(sb_t *sbh)
++{
++	sb_info_t *si;
++	uint idx;
++
++	si = SB_INFO(sbh);
++
++	if (si == NULL)
++		return;
++
++	if (BUSTYPE(si->sb.bustype) == SB_BUS)
++		for (idx = 0; idx < SB_MAXCORES; idx++)
++			if (si->regs[idx]) {
++				REG_UNMAP(si->regs[idx]);
++				si->regs[idx] = NULL;
++			}
++
++	if (si != &ksi)
++		MFREE(si->osh, si, sizeof (sb_info_t));
++}
++
++/* use pci dev id to determine chip id for chips not having a chipcommon core */
++static uint
++BCMINITFN(sb_pcidev2chip)(uint pcidev)
++{
++	if ((pcidev >= BCM4710_DEVICE_ID) && (pcidev <= BCM47XX_USB_ID))
++		return (BCM4710_DEVICE_ID);
++	if ((pcidev >= BCM4402_DEVICE_ID) && (pcidev <= BCM4402_V90_ID))
++		return (BCM4402_DEVICE_ID);
++	if (pcidev == BCM4401_ENET_ID)
++		return (BCM4402_DEVICE_ID);
++	if ((pcidev >= BCM4307_V90_ID) && (pcidev <= BCM4307_D11B_ID))
++		return (BCM4307_DEVICE_ID);
++	if (pcidev == BCM4301_DEVICE_ID)
++		return (BCM4301_DEVICE_ID);
++
++	return (0);
++}
++
++/* convert chip number to number of i/o cores */
++static uint
++BCMINITFN(sb_chip2numcores)(uint chip)
++{
++	if (chip == BCM4710_DEVICE_ID)
++		return (9);
++	if (chip == BCM4402_DEVICE_ID)
++		return (3);
++	if ((chip == BCM4301_DEVICE_ID) || (chip == BCM4307_DEVICE_ID))
++		return (5);
++	if (chip == BCM4306_DEVICE_ID)	/* < 4306c0 */
++		return (6);
++	if (chip == BCM4704_DEVICE_ID)
++		return (9);
++	if (chip == BCM5365_DEVICE_ID)
++		return (7);
++
++	SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
++	ASSERT(0);
++	return (1);
++}
++
++/* return index of coreid or BADIDX if not found */
++static uint
++sb_findcoreidx( sb_info_t *si, uint coreid, uint coreunit)
++{
++	uint found;
++	uint i;
++
++	found = 0;
++
++	for (i = 0; i < si->numcores; i++)
++		if (si->coreid[i] == coreid) {
++			if (found == coreunit)
++				return (i);
++			found++;
++		}
++
++	return (BADIDX);
++}
++
++/* 
++ * this function changes logical "focus" to the indiciated core, 
++ * must be called with interrupt off.
++ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
++ */
++void*
++sb_setcoreidx(sb_t *sbh, uint coreidx)
++{
++	sb_info_t *si;
++	uint32 sbaddr;
++	uint8 tmp;
++
++	si = SB_INFO(sbh);
++
++	if (coreidx >= si->numcores)
++		return (NULL);
++	
++	/*
++	 * If the user has provided an interrupt mask enabled function,
++	 * then assert interrupts are disabled before switching the core.
++	 */
++	ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg));
++
++	sbaddr = SB_ENUM_BASE + (coreidx * SB_CORE_SIZE);
++
++	switch (BUSTYPE(si->sb.bustype)) {
++	case SB_BUS:
++		/* map new one */
++		if (!si->regs[coreidx]) {
++			si->regs[coreidx] = (void*)REG_MAP(sbaddr, SB_CORE_SIZE);
++			ASSERT(GOODREGS(si->regs[coreidx]));
++		}
++		si->curmap = si->regs[coreidx];
++		break;
++
++	case PCI_BUS:
++		/* point bar0 window */
++		OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
++		break;
++
++	case PCMCIA_BUS:
++		tmp = (sbaddr >> 12) & 0x0f;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
++		tmp = (sbaddr >> 16) & 0xff;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
++		tmp = (sbaddr >> 24) & 0xff;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
++		break;
++#ifdef BCMJTAG
++	case JTAG_BUS:
++		/* map new one */
++		if (!si->regs[coreidx]) {
++			si->regs[coreidx] = (void *)sbaddr;
++			ASSERT(GOODREGS(si->regs[coreidx]));
++		}
++		si->curmap = si->regs[coreidx];
++		break;
++#endif	/* BCMJTAG */
++	}
++
++	si->curidx = coreidx;
++
++	return (si->curmap);
++}
++
++/* 
++ * this function changes logical "focus" to the indiciated core, 
++ * must be called with interrupt off.
++ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
++ */
++void*
++sb_setcore(sb_t *sbh, uint coreid, uint coreunit)
++{
++	sb_info_t *si;
++	uint idx;
++
++	si = SB_INFO(sbh);
++	idx = sb_findcoreidx(si, coreid, coreunit);
++	if (!GOODIDX(idx))
++		return (NULL);
++
++	return (sb_setcoreidx(sbh, idx));
++}
++
++/* return chip number */
++uint
++BCMINITFN(sb_chip)(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->sb.chip);
++}
++
++/* return chip revision number */
++uint
++BCMINITFN(sb_chiprev)(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->sb.chiprev);
++}
++
++/* return chip common revision number */
++uint
++BCMINITFN(sb_chipcrev)(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->sb.ccrev);
++}
++
++/* return chip package option */
++uint
++BCMINITFN(sb_chippkg)(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->sb.chippkg);
++}
++
++/* return PCI core rev. */
++uint
++BCMINITFN(sb_pcirev)(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->sb.buscorerev);
++}
++
++bool
++BCMINITFN(sb_war16165)(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++
++	return (PCI(si) && (si->sb.buscorerev <= 10));
++}
++
++static void 
++BCMINITFN(sb_war30841)(sb_info_t *si)
++{
++	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
++	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
++	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
++}
++
++/* return PCMCIA core rev. */
++uint
++BCMINITFN(sb_pcmciarev)(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->sb.buscorerev);
++}
++
++/* return board vendor id */
++uint
++BCMINITFN(sb_boardvendor)(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->sb.boardvendor);
++}
++
++/* return boardtype */
++uint
++BCMINITFN(sb_boardtype)(sb_t *sbh)
++{
++	sb_info_t *si;
++	char *var;
++
++	si = SB_INFO(sbh);
++
++	if (BUSTYPE(si->sb.bustype) == SB_BUS && si->sb.boardtype == 0xffff) {
++		/* boardtype format is a hex string */
++		si->sb.boardtype = getintvar(NULL, "boardtype");
++
++		/* backward compatibility for older boardtype string format */
++		if ((si->sb.boardtype == 0) && (var = getvar(NULL, "boardtype"))) {
++			if (!strcmp(var, "bcm94710dev"))
++				si->sb.boardtype = BCM94710D_BOARD;
++			else if (!strcmp(var, "bcm94710ap"))
++				si->sb.boardtype = BCM94710AP_BOARD;
++			else if (!strcmp(var, "bu4710"))
++				si->sb.boardtype = BU4710_BOARD;
++			else if (!strcmp(var, "bcm94702mn"))
++				si->sb.boardtype = BCM94702MN_BOARD;
++			else if (!strcmp(var, "bcm94710r1"))
++				si->sb.boardtype = BCM94710R1_BOARD;
++			else if (!strcmp(var, "bcm94710r4"))
++				si->sb.boardtype = BCM94710R4_BOARD;
++			else if (!strcmp(var, "bcm94702cpci"))
++				si->sb.boardtype = BCM94702CPCI_BOARD;
++			else if (!strcmp(var, "bcm95380_rr"))
++				si->sb.boardtype = BCM95380RR_BOARD;
++		}
++	}
++
++	return (si->sb.boardtype);
++}
++
++/* return bus type of sbh device */
++uint
++sb_bus(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	return (si->sb.bustype);
++}
++
++/* return bus core type */
++uint
++sb_buscoretype(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++
++	return (si->sb.buscoretype);
++}
++
++/* return bus core revision */
++uint
++sb_buscorerev(sb_t *sbh)
++{
++	sb_info_t *si;
++	si = SB_INFO(sbh);
++
++	return (si->sb.buscorerev);
++}
++
++/* return list of found cores */
++uint
++sb_corelist(sb_t *sbh, uint coreid[])
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++
++	bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof (uint)));
++	return (si->numcores);
++}
++
++/* return current register mapping */
++void *
++sb_coreregs(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	ASSERT(GOODREGS(si->curmap));
++
++	return (si->curmap);
++}
++
++
++/* do buffered registers update */
++void
++sb_commit(sb_t *sbh)
++{
++	sb_info_t *si;
++	uint origidx;
++	uint intr_val = 0;
++
++	si = SB_INFO(sbh);
++
++	origidx = si->curidx;
++	ASSERT(GOODIDX(origidx));
++
++	INTR_OFF(si, intr_val);
++
++	/* switch over to chipcommon core if there is one, else use pci */
++	if (si->sb.ccrev != NOREV) {
++		chipcregs_t *ccregs = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
++
++		/* do the buffer registers update */
++		W_REG(&ccregs->broadcastaddress, SB_COMMIT);
++		W_REG(&ccregs->broadcastdata, 0x0);
++	} else if (PCI(si)) {
++		sbpciregs_t *pciregs = (sbpciregs_t *)sb_setcore(sbh, SB_PCI, 0);
++
++		/* do the buffer registers update */
++		W_REG(&pciregs->bcastaddr, SB_COMMIT);
++		W_REG(&pciregs->bcastdata, 0x0);
++	} else
++		ASSERT(0);
++
++	/* restore core index */
++	sb_setcoreidx(sbh, origidx);
++	INTR_RESTORE(si, intr_val);
++}
++
++/* reset and re-enable a core */
++void
++sb_core_reset(sb_t *sbh, uint32 bits)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++	volatile uint32 dummy;
++
++	si = SB_INFO(sbh);
++	ASSERT(GOODREGS(si->curmap));
++	sb = REGS2SB(si->curmap);
++
++	/*
++	 * Must do the disable sequence first to work for arbitrary current core state.
++	 */
++	sb_core_disable(sbh, bits);
++
++	/*
++	 * Now do the initialization sequence.
++	 */
++
++	/* set reset while enabling the clock and forcing them on throughout the core */
++	W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits));
++	dummy = R_SBREG(si, &sb->sbtmstatelow);
++	OSL_DELAY(1);
++
++	if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) {
++		W_SBREG(si, &sb->sbtmstatehigh, 0);
++	}
++	if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
++		AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
++	}
++
++	/* clear reset and allow it to propagate throughout the core */
++	W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
++	dummy = R_SBREG(si, &sb->sbtmstatelow);
++	OSL_DELAY(1);
++
++	/* leave clock enabled */
++	W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits));
++	dummy = R_SBREG(si, &sb->sbtmstatelow);
++	OSL_DELAY(1);
++}
++
++void
++sb_core_tofixup(sb_t *sbh)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++
++	si = SB_INFO(sbh);
++
++	if ( (BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) || (PCI(si) && (si->sb.buscorerev >= 5)) )
++		return;
++
++	ASSERT(GOODREGS(si->curmap));
++	sb = REGS2SB(si->curmap);
++
++	if (BUSTYPE(si->sb.bustype) == SB_BUS) {
++		SET_SBREG(si, &sb->sbimconfiglow,
++			  SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
++			  (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
++	} else {
++		if (sb_coreid(sbh) == SB_PCI) {
++			SET_SBREG(si, &sb->sbimconfiglow,
++				  SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
++				  (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
++		} else {
++			SET_SBREG(si, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
++		}
++	}
++
++	sb_commit(sbh);
++}
++
++/*
++ * Set the initiator timeout for the "master core".
++ * The master core is defined to be the core in control
++ * of the chip and so it issues accesses to non-memory
++ * locations (Because of dma *any* core can access memeory).
++ *
++ * The routine uses the bus to decide who is the master:
++ *	SB_BUS => mips
++ *	JTAG_BUS => chipc
++ *	PCI_BUS => pci or pcie
++ *	PCMCIA_BUS => pcmcia
++ *	SDIO_BUS => pcmcia
++ *
++ * This routine exists so callers can disable initiator
++ * timeouts so accesses to very slow devices like otp
++ * won't cause an abort. The routine allows arbitrary
++ * settings of the service and request timeouts, though.
++ *
++ * Returns the timeout state before changing it or -1
++ * on error.
++ */
++
++#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
++
++uint32
++sb_set_initiator_to(sb_t *sbh, uint32 to)
++{
++	sb_info_t *si;
++	uint origidx, idx;
++	uint intr_val = 0;
++	uint32 tmp, ret = 0xffffffff;
++	sbconfig_t *sb;
++
++	si = SB_INFO(sbh);
++
++	if ((to & ~TO_MASK) != 0)
++		return ret;
++
++	/* Figure out the master core */
++	idx = BADIDX;
++	switch (BUSTYPE(si->sb.bustype)) {
++	case PCI_BUS:
++		idx = si->sb.buscoreidx; 
++		break;
++	case JTAG_BUS:
++		idx = SB_CC_IDX;
++		break;
++	case PCMCIA_BUS:
++	case SDIO_BUS:
++		idx = sb_findcoreidx(si, SB_PCMCIA, 0);
++		break;
++	case SB_BUS:
++		if ((idx = sb_findcoreidx(si, SB_MIPS33, 0)) == BADIDX)
++			idx = sb_findcoreidx(si, SB_MIPS, 0);
++		break;
++	default:
++		ASSERT(0);
++	}
++	if (idx == BADIDX)
++		return ret;
++
++	INTR_OFF(si, intr_val);
++	origidx = sb_coreidx(sbh);
++
++	sb = REGS2SB(sb_setcoreidx(sbh, idx));
++
++	tmp = R_SBREG(si, &sb->sbimconfiglow);
++	ret = tmp & TO_MASK;
++	W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
++
++	sb_commit(sbh);
++	sb_setcoreidx(sbh, origidx);
++	INTR_RESTORE(si, intr_val);
++	return ret;
++}
++
++void
++sb_core_disable(sb_t *sbh, uint32 bits)
++{
++	sb_info_t *si;
++	volatile uint32 dummy;
++	uint32 rej;
++	sbconfig_t *sb;
++
++	si = SB_INFO(sbh);
++
++	ASSERT(GOODREGS(si->curmap));
++	sb = REGS2SB(si->curmap);
++
++	/* if core is already in reset, just return */
++	if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET)
++		return;
++
++	/* reject value changed between sonics 2.2 and 2.3 */
++	if (si->sb.sonicsrev == SONICS_2_2)
++		rej = (1 << SBTML_REJ_SHIFT);
++	else
++		rej = (2 << SBTML_REJ_SHIFT);
++
++	/* if clocks are not enabled, put into reset and return */
++	if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0)
++		goto disable;
++
++	/* set target reject and spin until busy is clear (preserve core-specific bits) */
++	OR_SBREG(si, &sb->sbtmstatelow, rej);
++	dummy = R_SBREG(si, &sb->sbtmstatelow);
++	OSL_DELAY(1);
++	SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
++
++ 	if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) {
++		OR_SBREG(si, &sb->sbimstate, SBIM_RJ);
++		dummy = R_SBREG(si, &sb->sbimstate);
++		OSL_DELAY(1);
++		SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000);
++	}
++
++	/* set reset and reject while enabling the clocks */
++	W_SBREG(si, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET));
++	dummy = R_SBREG(si, &sb->sbtmstatelow);
++	OSL_DELAY(10);
++
++	/* don't forget to clear the initiator reject bit */
++	if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT)
++		AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);
++
++disable:
++	/* leave reset and reject asserted */
++	W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET));
++	OSL_DELAY(1);
++}
++
++/* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
++void
++sb_watchdog(sb_t *sbh, uint ticks)
++{
++	sb_info_t *si = SB_INFO(sbh);
++
++	/* instant NMI */
++	switch (si->gpioid) {
++	case SB_CC:
++		sb_corereg(si, 0, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
++		break;
++	case SB_EXTIF:
++		sb_corereg(si, si->gpioidx, OFFSETOF(extifregs_t, watchdog), ~0, ticks);
++		break;
++	}
++}
++
++/* initialize the pcmcia core */
++void
++sb_pcmcia_init(sb_t *sbh)
++{
++	sb_info_t *si;
++	uint8 cor;
++
++	si = SB_INFO(sbh);
++
++	/* enable d11 mac interrupts */
++	if (si->sb.chip == BCM4301_DEVICE_ID) {
++		/* Have to use FCR2 in 4301 */
++		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR2 + PCMCIA_COR, &cor, 1);
++		cor |= COR_IRQEN | COR_FUNEN;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR2 + PCMCIA_COR, &cor, 1);
++	} else {
++		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
++		cor |= COR_IRQEN | COR_FUNEN;
++		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
++	}
++
++}
++
++
++/*
++ * Configure the pci core for pci client (NIC) action
++ * coremask is the bitvec of cores by index to be enabled.
++ */
++void
++sb_pci_setup(sb_t *sbh, uint coremask)
++{
++	sb_info_t *si;
++	sbconfig_t *sb;
++	sbpciregs_t *pciregs;
++	uint32 sbflag;
++	uint32 w;
++	uint idx;
++	int reg_val;
++
++	si = SB_INFO(sbh);
++
++	/* if not pci bus, we're done */
++	if (BUSTYPE(si->sb.bustype) != PCI_BUS)
++		return;
++
++	ASSERT(PCI(si) || PCIE(si));
++	ASSERT(si->sb.buscoreidx != BADIDX);
++
++	/* get current core index */
++	idx = si->curidx;
++
++	/* we interrupt on this backplane flag number */
++	ASSERT(GOODREGS(si->curmap));
++	sb = REGS2SB(si->curmap);
++	sbflag = R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
++
++	/* switch over to pci core */
++	pciregs = (sbpciregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
++	sb = REGS2SB(pciregs);
++
++	/*
++	 * Enable sb->pci interrupts.  Assume
++	 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
++	 */
++	if (PCIE(si) || (PCI(si) && ((si->sb.buscorerev) >= 6))) {
++		/* pci config write to set this core bit in PCIIntMask */
++		w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
++		w |= (coremask << PCI_SBIM_SHIFT);
++		OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
++	} else {
++		/* set sbintvec bit for our flag number */
++		OR_SBREG(si, &sb->sbintvec, (1 << sbflag));
++	}
++
++	if (PCI(si)) {
++		OR_REG(&pciregs->sbtopci2, (SBTOPCI_PREF|SBTOPCI_BURST));
++		if (si->sb.buscorerev >= 11)
++			OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
++		if (si->sb.buscorerev < 5) {
++			SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
++				(0x3 << SBIMCL_RTO_SHIFT) | 0x2);
++			sb_commit(sbh);
++		}
++	}
++
++	if (PCIE(si) && (si->sb.buscorerev == 0)) {
++		reg_val = sb_pcie_readreg((void *)sbh, (void *)PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG);
++		reg_val |= 0x8; 
++		sb_pcie_writereg((void *)sbh, (void *)PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG, reg_val);
++
++		reg_val = sb_pcie_readreg((void *)sbh, (void *)PCIE_PCIEREGS, PCIE_DLLP_LCREG);
++		reg_val &= ~(0x40);
++		sb_pcie_writereg(sbh, (void *)PCIE_PCIEREGS, PCIE_DLLP_LCREG, reg_val);
++
++		BCMINIT(sb_war30841)(si);
++	}
++
++	/* switch back to previous core */
++	sb_setcoreidx(sbh, idx);
++}
++
++uint32
++sb_base(uint32 admatch)
++{
++	uint32 base;
++	uint type;
++
++	type = admatch & SBAM_TYPE_MASK;
++	ASSERT(type < 3);
++
++	base = 0;
++
++	if (type == 0) {
++		base = admatch & SBAM_BASE0_MASK;
++	} else if (type == 1) {
++		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
++		base = admatch & SBAM_BASE1_MASK;
++	} else if (type == 2) {
++		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
++		base = admatch & SBAM_BASE2_MASK;
++	}
++
++	return (base);
++}
++
++uint32
++sb_size(uint32 admatch)
++{
++	uint32 size;
++	uint type;
++
++	type = admatch & SBAM_TYPE_MASK;
++	ASSERT(type < 3);
++
++	size = 0;
++
++	if (type == 0) {
++		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
++	} else if (type == 1) {
++		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
++		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
++	} else if (type == 2) {
++		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
++		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
++	}
++
++	return (size);
++}
++
++/* return the core-type instantiation # of the current core */
++uint
++sb_coreunit(sb_t *sbh)
++{
++	sb_info_t *si;
++	uint idx;
++	uint coreid;
++	uint coreunit;
++	uint i;
++
++	si = SB_INFO(sbh);
++	coreunit = 0;
++
++	idx = si->curidx;
++
++	ASSERT(GOODREGS(si->curmap));
++	coreid = sb_coreid(sbh);
++
++	/* count the cores of our type */
++	for (i = 0; i < idx; i++)
++		if (si->coreid[i] == coreid)
++			coreunit++;
++
++	return (coreunit);
++}
++
++static INLINE uint32
++factor6(uint32 x)
++{
++	switch (x) {
++	case CC_F6_2:	return 2;
++	case CC_F6_3:	return 3;
++	case CC_F6_4:	return 4;
++	case CC_F6_5:	return 5;
++	case CC_F6_6:	return 6;
++	case CC_F6_7:	return 7;
++	default:	return 0;
++	}
++}
++
++/* calculate the speed the SB would run at given a set of clockcontrol values */
++uint32
++sb_clock_rate(uint32 pll_type, uint32 n, uint32 m)
++{
++	uint32 n1, n2, clock, m1, m2, m3, mc;
++
++	n1 = n & CN_N1_MASK;
++	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
++
++	if (pll_type == PLL_TYPE6) {
++		if (m & CC_T6_MMASK)
++			return CC_T6_M1;
++		else
++			return CC_T6_M0;
++	} else if ((pll_type == PLL_TYPE1) ||
++		   (pll_type == PLL_TYPE3) ||
++		   (pll_type == PLL_TYPE4) ||
++		   (pll_type == PLL_TYPE7)) {
++		n1 = factor6(n1);
++		n2 += CC_F5_BIAS;
++	} else if (pll_type == PLL_TYPE2) {
++		n1 += CC_T2_BIAS;
++		n2 += CC_T2_BIAS;
++		ASSERT((n1 >= 2) && (n1 <= 7));
++		ASSERT((n2 >= 5) && (n2 <= 23));
++	} else if (pll_type == PLL_TYPE5) {
++		return (100000000);
++	} else
++		ASSERT(0);
++	/* PLL types 3 and 7 use BASE2 (25Mhz) */
++	if ((pll_type == PLL_TYPE3) ||
++	    (pll_type == PLL_TYPE7)) { 
++		clock =  CC_CLOCK_BASE2 * n1 * n2;
++	}
++	else 
++		clock = CC_CLOCK_BASE1 * n1 * n2;
++
++	if (clock == 0)
++		return 0;
++
++	m1 = m & CC_M1_MASK;
++	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
++	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
++	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
++
++	if ((pll_type == PLL_TYPE1) ||
++	    (pll_type == PLL_TYPE3) ||
++	    (pll_type == PLL_TYPE4) ||
++	    (pll_type == PLL_TYPE7)) {
++		m1 = factor6(m1);
++		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
++			m2 += CC_F5_BIAS;
++		else
++			m2 = factor6(m2);
++		m3 = factor6(m3);
++
++		switch (mc) {
++		case CC_MC_BYPASS:	return (clock);
++		case CC_MC_M1:		return (clock / m1);
++		case CC_MC_M1M2:	return (clock / (m1 * m2));
++		case CC_MC_M1M2M3:	return (clock / (m1 * m2 * m3));
++		case CC_MC_M1M3:	return (clock / (m1 * m3));
++		default:		return (0);
++		}
++	} else {
++		ASSERT(pll_type == PLL_TYPE2);
++
++		m1 += CC_T2_BIAS;
++		m2 += CC_T2M2_BIAS;
++		m3 += CC_T2_BIAS;
++		ASSERT((m1 >= 2) && (m1 <= 7));
++		ASSERT((m2 >= 3) && (m2 <= 10));
++		ASSERT((m3 >= 2) && (m3 <= 7));
++
++		if ((mc & CC_T2MC_M1BYP) == 0)
++			clock /= m1;
++		if ((mc & CC_T2MC_M2BYP) == 0)
++			clock /= m2;
++		if ((mc & CC_T2MC_M3BYP) == 0)
++			clock /= m3;
++
++		return(clock);
++	}
++}
++
++/* returns the current speed the SB is running at */
++uint32
++sb_clock(sb_t *sbh)
++{
++	sb_info_t *si;
++	extifregs_t *eir;
++	chipcregs_t *cc;
++	uint32 n, m;
++	uint idx;
++	uint32 pll_type, rate;
++	uint intr_val = 0;
++
++	si = SB_INFO(sbh);
++	idx = si->curidx;
++	pll_type = PLL_TYPE1;
++
++	INTR_OFF(si, intr_val);
++
++	/* switch to extif or chipc core */
++	if ((eir = (extifregs_t *) sb_setcore(sbh, SB_EXTIF, 0))) {
++		n = R_REG(&eir->clockcontrol_n);
++		m = R_REG(&eir->clockcontrol_sb);
++	} else if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0))) {
++		pll_type = R_REG(&cc->capabilities) & CAP_PLL_MASK;
++		n = R_REG(&cc->clockcontrol_n);
++		if (pll_type == PLL_TYPE6)
++			m = R_REG(&cc->clockcontrol_mips);
++		else if (pll_type == PLL_TYPE3)
++		{
++			// Added by Chen-I for 5365 
++			if (BCMINIT(sb_chip)(sbh) == BCM5365_DEVICE_ID) 	
++				m = R_REG(&cc->clockcontrol_sb);
++			else
++				m = R_REG(&cc->clockcontrol_m2);
++		}
++		else
++			m = R_REG(&cc->clockcontrol_sb);
++	} else {
++		INTR_RESTORE(si, intr_val);
++		return 0;
++	}
++
++	// Added by Chen-I for 5365 
++	if (BCMINIT(sb_chip)(sbh) == BCM5365_DEVICE_ID)
++	{
++		rate = 100000000;
++	}
++	else
++	{	
++		/* calculate rate */
++		rate = sb_clock_rate(pll_type, n, m);
++		if (pll_type == PLL_TYPE3)
++			rate = rate / 2;
++	}
++
++	/* switch back to previous core */
++	sb_setcoreidx(sbh, idx);
++
++	INTR_RESTORE(si, intr_val);
++
++	return rate;
++}
++
++/* change logical "focus" to the gpio core for optimized access */
++void*
++sb_gpiosetcore(sb_t *sbh)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++
++	return (sb_setcoreidx(sbh, si->gpioidx));
++}
++
++/* mask&set gpiocontrol bits */
++uint32
++sb_gpiocontrol(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
++{
++	sb_info_t *si;
++	uint regoff;
++
++	si = SB_INFO(sbh);
++	regoff = 0;
++
++	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++
++	/* gpios could be shared on router platforms */
++	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
++		mask = priority ? (sb_gpioreservation & mask) :
++			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
++		val &= mask;
++	}
++
++	switch (si->gpioid) {
++	case SB_CC:
++		regoff = OFFSETOF(chipcregs_t, gpiocontrol);
++		break;
++
++	case SB_PCI:
++		regoff = OFFSETOF(sbpciregs_t, gpiocontrol);
++		break;
++
++	case SB_EXTIF:
++		return (0);
++	}
++
++	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
++}
++
++/* mask&set gpio output enable bits */
++uint32
++sb_gpioouten(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
++{
++	sb_info_t *si;
++	uint regoff;
++
++	si = SB_INFO(sbh);
++	regoff = 0;
++
++	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++
++	/* gpios could be shared on router platforms */
++	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
++		mask = priority ? (sb_gpioreservation & mask) :
++			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
++		val &= mask;
++	}
++
++	switch (si->gpioid) {
++	case SB_CC:
++		regoff = OFFSETOF(chipcregs_t, gpioouten);
++		break;
++
++	case SB_PCI:
++		regoff = OFFSETOF(sbpciregs_t, gpioouten);
++		break;
++
++	case SB_EXTIF:
++		regoff = OFFSETOF(extifregs_t, gpio[0].outen);
++		break;
++	}
++
++	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
++}
++
++/* mask&set gpio output bits */
++uint32
++sb_gpioout(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
++{
++	sb_info_t *si;
++	uint regoff;
++
++	si = SB_INFO(sbh);
++	regoff = 0;
++
++	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++
++	/* gpios could be shared on router platforms */
++	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
++		mask = priority ? (sb_gpioreservation & mask) :
++			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
++		val &= mask;
++	}
++
++	switch (si->gpioid) {
++	case SB_CC:
++		regoff = OFFSETOF(chipcregs_t, gpioout);
++		break;
++
++	case SB_PCI:
++		regoff = OFFSETOF(sbpciregs_t, gpioout);
++		break;
++
++	case SB_EXTIF:
++		regoff = OFFSETOF(extifregs_t, gpio[0].out);
++		break;
++	}
++
++	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
++}
 +
-+void testcrc32(void)
++/* reserve one gpio */
++uint32
++sb_gpioreserve(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
 +{
-+	uint j,k,l;
-+	uint8 *buf;
-+	uint len[CNBUFS];
-+	uint32 crcr;
-+	uint32 crc32tv[CNBUFS] =
-+		{0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
++	sb_info_t *si;
 +
-+	ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
++	si = SB_INFO(sbh);
 +
-+	/* step through all possible alignments */
-+	for (l=0;l<=4;l++) {
-+		for (j=0; j<CNBUFS; j++) {
-+			len[j] = CLEN;
-+			for (k=0; k<len[j]; k++)
-+				*(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
-+		}
++	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
 +
-+		for (j=0; j<CNBUFS; j++) {
-+			crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
-+			ASSERT(crcr == crc32tv[j]);
-+		}
++	/* only cores on SB_BUS share GPIO's and only applcation users need to reserve/release GPIO */
++	if ( (BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority))  {
++		ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
++		return -1;
++	}
++	/* make sure only one bit is set */
++	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
++		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
++		return -1;
 +	}
 +
-+	MFREE(buf, CBUFSIZ*CNBUFS);
-+	return;
-+}
-+#endif
++	/* already reserved */
++	if (sb_gpioreservation & gpio_bitmask)
++		return -1;
++	/* set reservation */
++	sb_gpioreservation |= gpio_bitmask;
 +
++	return sb_gpioreservation;
++}
 +
++/* release one gpio */
 +/* 
-+ * Advance from the current 1-byte tag/1-byte length/variable-length value 
-+ * triple, to the next, returning a pointer to the next.
-+ * If the current or next TLV is invalid (does not fit in given buffer length),
-+ * NULL is returned.
-+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
-+ * by the TLV paramter's length if it is valid.
-+ */
-+bcm_tlv_t *
-+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
++ * releasing the gpio doesn't change the current value on the GPIO last write value 
++ * persists till some one overwrites it
++*/
++
++uint32
++sb_gpiorelease(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
 +{
-+	int len;
++	sb_info_t *si;
 +
-+	/* validate current elt */
-+	if (!bcm_valid_tlv(elt, *buflen))
-+		return NULL;
-+	
-+	/* advance to next elt */
-+	len = elt->len;
-+	elt = (bcm_tlv_t*)(elt->data + len);
-+	*buflen -= (2 + len);
-+	
-+	/* validate next elt */
-+	if (!bcm_valid_tlv(elt, *buflen))
-+		return NULL;
++	si = SB_INFO(sbh);
++
++	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++
++	/* only cores on SB_BUS share GPIO's and only applcation users need to reserve/release GPIO */
++	if ( (BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority))  {
++		ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
++		return -1;
++	}
++	/* make sure only one bit is set */
++	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
++		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
++		return -1;
++	}
 +	
-+	return elt;
++	/* already released */
++	if (!(sb_gpioreservation & gpio_bitmask))
++		return -1;
++
++	/* clear reservation */
++	sb_gpioreservation &= ~gpio_bitmask;
++
++	return sb_gpioreservation;
 +}
 +
-+/* 
-+ * Traverse a string of 1-byte tag/1-byte length/variable-length value 
-+ * triples, returning a pointer to the substring whose first element 
-+ * matches tag
-+ */
-+bcm_tlv_t *
-+bcm_parse_tlvs(void *buf, int buflen, uint key)
++/* return the current gpioin register value */
++uint32
++sb_gpioin(sb_t *sbh)
 +{
-+	bcm_tlv_t *elt;
-+	int totlen;
++	sb_info_t *si;
++	uint regoff;
 +
-+	elt = (bcm_tlv_t*)buf;
-+	totlen = buflen;
++	si = SB_INFO(sbh);
++	regoff = 0;
 +
-+	/* find tagged parameter */
-+	while (totlen >= 2) {
-+		int len = elt->len;
++	switch (si->gpioid) {
++	case SB_CC:
++		regoff = OFFSETOF(chipcregs_t, gpioin);
++		break;
 +
-+		/* validate remaining totlen */
-+		if ((elt->id == key) && (totlen >= (len + 2)))
-+			return (elt);
++	case SB_PCI:
++		regoff = OFFSETOF(sbpciregs_t, gpioin);
++		break;
 +
-+		elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
-+		totlen -= (len + 2);
++	case SB_EXTIF:
++		regoff = OFFSETOF(extifregs_t, gpioin);
++		break;
 +	}
-+	
-+	return NULL;
++
++	return (sb_corereg(si, si->gpioidx, regoff, 0, 0));
 +}
 +
-+/* 
-+ * Traverse a string of 1-byte tag/1-byte length/variable-length value 
-+ * triples, returning a pointer to the substring whose first element 
-+ * matches tag.  Stop parsing when we see an element whose ID is greater
-+ * than the target key. 
-+ */
-+bcm_tlv_t *
-+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
++/* mask&set gpio interrupt polarity bits */
++uint32
++sb_gpiointpolarity(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
 +{
-+	bcm_tlv_t *elt;
-+	int totlen;
++	sb_info_t *si;
++	uint regoff;
 +
-+	elt = (bcm_tlv_t*)buf;
-+	totlen = buflen;
++	si = SB_INFO(sbh);
++	regoff = 0;
 +
-+	/* find tagged parameter */
-+	while (totlen >= 2) {
-+		uint id = elt->id;
-+		int len = elt->len;
-+		
-+		/* Punt if we start seeing IDs > than target key */
-+		if (id > key)
-+			return(NULL);
++	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
 +
-+		/* validate remaining totlen */
-+		if ((id == key) && (totlen >= (len + 2)))
-+			return (elt);
++	/* gpios could be shared on router platforms */
++	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
++		mask = priority ? (sb_gpioreservation & mask) :
++			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
++		val &= mask;
++	}
 +
-+		elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
-+		totlen -= (len + 2);
++	switch (si->gpioid) {
++	case SB_CC:
++		regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
++		break;
++
++	case SB_PCI:
++		/* pci gpio implementation does not support interrupt polarity */
++		ASSERT(0);
++		break;
++
++	case SB_EXTIF:
++		regoff = OFFSETOF(extifregs_t, gpiointpolarity);
++		break;
 +	}
-+	return NULL;
++
++	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
 +}
-+/* routine to dump fields in a fileddesc structure */
 +
-+uint 
-+bcmdumpfields(readreg_rtn read_rtn, void *arg0, void *arg1, struct fielddesc *fielddesc_array, char *buf, uint32 bufsize)
++/* mask&set gpio interrupt mask bits */
++uint32
++sb_gpiointmask(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
 +{
-+	uint  filled_len;
-+	uint len;
-+	struct fielddesc *cur_ptr;
++	sb_info_t *si;
++	uint regoff;
 +
-+	filled_len = 0;
-+	cur_ptr = fielddesc_array; 
++	si = SB_INFO(sbh);
++	regoff = 0;
 +
-+	while (bufsize > (filled_len + 64)) {
-+		if (cur_ptr->nameandfmt == NULL)
-+			break;
-+		len = sprintf(buf, cur_ptr->nameandfmt, read_rtn(arg0, arg1, cur_ptr->offset));
-+		buf += len;
-+		filled_len += len;
-+		cur_ptr++;
++	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++
++	/* gpios could be shared on router platforms */
++	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
++		mask = priority ? (sb_gpioreservation & mask) :
++			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
++		val &= mask;
 +	}
-+	return filled_len;
++
++	switch (si->gpioid) {
++	case SB_CC:
++		regoff = OFFSETOF(chipcregs_t, gpiointmask);
++		break;
++
++	case SB_PCI:
++		/* pci gpio implementation does not support interrupt mask */
++		ASSERT(0);
++		break;
++
++	case SB_EXTIF:
++		regoff = OFFSETOF(extifregs_t, gpiointmask);
++		break;
++	}
++
++	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
++}
++
++/* assign the gpio to an led */
++uint32
++sb_gpioled(sb_t *sbh, uint32 mask, uint32 val)
++{
++	sb_info_t *si;
++
++	si = SB_INFO(sbh);
++	if (si->sb.ccrev < 16)
++		return -1;
++
++	/* gpio led powersave reg */
++	return(sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
 +}
 +
-+uint
-+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
++/* mask&set gpio timer val */
++uint32 
++sb_gpiotimerval(sb_t *sbh, uint32 mask, uint32 gpiotimerval)
 +{
-+	uint len;
-+
-+	len = strlen(name) + 1;
-+	
-+	if ((len + datalen) > buflen)
-+		return 0;
-+
-+	strcpy(buf, name);
++	sb_info_t *si;
++	si = SB_INFO(sbh);
 +
-+	/* append data onto the end of the name string */
-+	memcpy(&buf[len], data, datalen);
-+	len += datalen;
++	if (si->sb.ccrev < 16)
++		return -1;
 +
-+	return len;
++	return(sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
 +}
 +
-+/* Quarter dBm units to mW
-+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
-+ * Table is offset so the last entry is largest mW value that fits in
-+ * a uint16.
-+ */
-+
-+#define QDBM_OFFSET 153
-+#define QDBM_TABLE_LEN 40
 +
-+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
-+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
-+ */
-+#define QDBM_TABLE_LOW_BOUND 6493
++/* return the slow clock source - LPO, XTAL, or PCI */
++static uint
++sb_slowclk_src(sb_info_t *si)
++{
++	chipcregs_t *cc;
 +
-+/* Largest mW value that will round down to the last table entry,
-+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
-+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
-+ */
-+#define QDBM_TABLE_HIGH_BOUND 64938
 +
-+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
-+/* qdBm:        +0		+1		+2		+3		+4		+5		+6		+7	*/
-+/* 153: */      6683,	7079,	7499,	7943,	8414,	8913,	9441,	10000,
-+/* 161: */      10593,	11220,	11885,	12589,	13335,	14125,	14962,	15849,
-+/* 169: */      16788,	17783,	18836,	19953,	21135,	22387,	23714,	25119,
-+/* 177: */      26607,	28184,	29854,	31623,	33497,	35481,	37584,	39811,
-+/* 185: */      42170,	44668,	47315,	50119,	53088,	56234,	59566,	63096
-+};
++	ASSERT(sb_coreid(&si->sb) == SB_CC);
 +
-+uint16
-+bcm_qdbm_to_mw(uint8 qdbm)
-+{
-+	uint factor = 1;
-+	int idx = qdbm - QDBM_OFFSET;
-+	
-+	if (idx > QDBM_TABLE_LEN) {
-+		/* clamp to max uint16 mW value */
-+		return 0xFFFF;
-+	}
-+	
-+	/* scale the qdBm index up to the range of the table 0-40
-+	 * where an offset of 40 qdBm equals a factor of 10 mW.
-+	 */
-+	while (idx < 0) {
-+		idx += 40;
-+		factor *= 10;
-+	}
-+	
-+	/* return the mW value scaled down to the correct factor of 10,
-+	 * adding in factor/2 to get proper rounding. */
-+	return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
++	if (si->sb.ccrev < 6) {
++		if ((BUSTYPE(si->sb.bustype) == PCI_BUS)
++			&& (OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32)) & PCI_CFG_GPIO_SCS))
++			return (SCC_SS_PCI);
++		else
++			return (SCC_SS_XTAL);
++	} else if (si->sb.ccrev < 10) {
++		cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
++		return (R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK);
++	} else	/* Insta-clock */
++		return (SCC_SS_XTAL);
 +}
 +
-+uint8
-+bcm_mw_to_qdbm(uint16 mw)
++/* return the ILP (slowclock) min or max frequency */
++static uint
++sb_slowclk_freq(sb_info_t *si, bool max)
 +{
-+	uint8 qdbm;
-+	int offset;
-+	uint mw_uint = mw;
-+	uint boundary;
-+	
-+	/* handle boundary case */
-+	if (mw_uint <= 1)
-+		return 0;
-+	
-+	offset = QDBM_OFFSET;
-+	
-+	/* move mw into the range of the table */
-+	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
-+		mw_uint *= 10;
-+		offset -= 40;
-+	}
++	chipcregs_t *cc;
++	uint32 slowclk;
++	uint div;
 +
-+	for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
-+		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - nqdBm_to_mW_map[qdbm])/2;
-+		if (mw_uint < boundary) break;
-+	}
 +
-+	qdbm += (uint8)offset;
++	ASSERT(sb_coreid(&si->sb) == SB_CC);
 +
-+	return(qdbm);
-+}
-diff -Nur linux-2.4.32/drivers/net/hnd/hnddma.c linux-2.4.32-brcm/drivers/net/hnd/hnddma.c
---- linux-2.4.32/drivers/net/hnd/hnddma.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/hnd/hnddma.c	2005-12-16 23:39:11.288858250 +0100
-@@ -0,0 +1,1527 @@
-+/*
-+ * Generic Broadcom Home Networking Division (HND) DMA module.
-+ * This supports the following chips: BCM42xx, 44xx, 47xx .
-+ *
-+ * Copyright 2005, Broadcom Corporation
-+ * All Rights Reserved.
-+ * 
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+ *
-+ * $Id$
-+ */
++	cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
 +
-+#include <typedefs.h>
-+#include <osl.h>
-+#include <bcmendian.h>
-+#include <sbconfig.h>
-+#include <bcmutils.h>
-+#include <bcmdevs.h>
-+#include <sbutils.h>
++	/* shouldn't be here unless we've established the chip has dynamic clk control */
++	ASSERT(R_REG(&cc->capabilities) & CAP_PWR_CTL);
 +
-+struct dma_info;	/* forward declaration */
-+#define di_t struct dma_info
++	slowclk = sb_slowclk_src(si);
++	if (si->sb.ccrev < 6) {
++		if (slowclk == SCC_SS_PCI)
++			return (max? (PCIMAXFREQ/64) : (PCIMINFREQ/64));
++		else
++			return (max? (XTALMAXFREQ/32) : (XTALMINFREQ/32));
++	} else if (si->sb.ccrev < 10) {
++		div = 4 * (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
++		if (slowclk == SCC_SS_LPO)
++			return (max? LPOMAXFREQ : LPOMINFREQ);
++		else if (slowclk == SCC_SS_XTAL)
++			return (max? (XTALMAXFREQ/div) : (XTALMINFREQ/div));
++		else if (slowclk == SCC_SS_PCI)
++			return (max? (PCIMAXFREQ/div) : (PCIMINFREQ/div));
++		else
++			ASSERT(0);
++	} else {
++		/* Chipc rev 10 is InstaClock */
++		div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
++		div = 4 * (div + 1);
++		return (max ? XTALMAXFREQ : (XTALMINFREQ/div));
++	}
++	return (0);
++}
 +
-+#include <sbhnddma.h>
-+#include <hnddma.h>
++static void
++sb_clkctl_setdelay(sb_info_t *si, void *chipcregs)
++{
++	chipcregs_t * cc;
++	uint slowmaxfreq, pll_delay, slowclk;
++	uint pll_on_delay, fref_sel_delay;
 +
-+/* debug/trace */
-+#define	DMA_ERROR(args)
-+#define	DMA_TRACE(args)
++	pll_delay = PLL_DELAY;
 +
-+/* default dma message level (if input msg_level pointer is null in dma_attach()) */
-+static uint dma_msg_level =
-+	0;
++	/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
++	 * since the xtal will also be powered down by dynamic clk control logic.
++	 */
++	slowclk = sb_slowclk_src(si);
++	if (slowclk != SCC_SS_XTAL)
++		pll_delay += XTAL_ON_DELAY;
 +
-+#define	MAXNAMEL	8
++	/* Starting with 4318 it is ILP that is used for the delays */
++	slowmaxfreq = sb_slowclk_freq(si, (si->sb.ccrev >= 10) ? FALSE : TRUE);
 +
-+/* dma engine software state */
-+typedef struct dma_info {
-+	hnddma_t	hnddma;		/* exported structure */
-+	uint		*msg_level;	/* message level pointer */
-+	char		name[MAXNAMEL];	/* callers name for diag msgs */
-+	
-+	void		*osh;		/* os handle */
-+	sb_t		*sbh;		/* sb handle */
-+	
-+	bool		dma64;		/* dma64 enabled */
-+	bool		addrext;	/* this dma engine supports DmaExtendedAddrChanges */
-+	
-+	dma32regs_t	*d32txregs;	/* 32 bits dma tx engine registers */
-+	dma32regs_t	*d32rxregs;	/* 32 bits dma rx engine registers */
-+	dma64regs_t	*d64txregs;	/* 64 bits dma tx engine registers */
-+	dma64regs_t	*d64rxregs;	/* 64 bits dma rx engine registers */
++	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
++	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
 +
-+	uint32		dma64align;	/* either 8k or 4k depends on number of dd */
-+	dma32dd_t	*txd32;		/* pointer to dma32 tx descriptor ring */
-+	dma64dd_t	*txd64;		/* pointer to dma64 tx descriptor ring */
-+	uint		ntxd;		/* # tx descriptors tunable */	
-+	uint		txin;		/* index of next descriptor to reclaim */
-+	uint		txout;		/* index of next descriptor to post */
-+	uint		txavail;	/* # free tx descriptors */
-+	void		**txp;		/* pointer to parallel array of pointers to packets */
-+	ulong		txdpa;		/* physical address of descriptor ring */
-+	uint		txdalign;	/* #bytes added to alloc'd mem to align txd */
-+	uint		txdalloc;	/* #bytes allocated for the ring */
++	cc = (chipcregs_t *)chipcregs;
++	W_REG(&cc->pll_on_delay, pll_on_delay);
++	W_REG(&cc->fref_sel_delay, fref_sel_delay);
++}
 +
-+	dma32dd_t	*rxd32;		/* pointer to dma32 rx descriptor ring */
-+	dma64dd_t	*rxd64;		/* pointer to dma64 rx descriptor ring */
-+	uint		nrxd;		/* # rx descriptors tunable */	
-+	uint		rxin;		/* index of next descriptor to reclaim */
-+	uint		rxout;		/* index of next descriptor to post */
-+	void		**rxp;		/* pointer to parallel array of pointers to packets */
-+	ulong		rxdpa;		/* physical address of descriptor ring */
-+	uint		rxdalign;	/* #bytes added to alloc'd mem to align rxd */
-+	uint		rxdalloc;	/* #bytes allocated for the ring */
++int
++sb_pwrctl_slowclk(void *sbh, bool set, uint *div)
++{
++	sb_info_t *si;
++	uint origidx;
++	chipcregs_t *cc;
++	uint intr_val = 0;
++	uint err = 0;
++	
++	si = SB_INFO(sbh);
 +
-+	/* tunables */
-+	uint		rxbufsize;	/* rx buffer size in bytes */
-+	uint		nrxpost;	/* # rx buffers to keep posted */
-+	uint		rxoffset;	/* rxcontrol offset */
-+	uint		ddoffsetlow;	/* add to get dma address of descriptor ring, low 32 bits */
-+	uint		ddoffsethigh;	/* add to get dma address of descriptor ring, high 32 bits */
-+	uint		dataoffsetlow;	/* add to get dma address of data buffer, low 32 bits */
-+	uint		dataoffsethigh;	/* add to get dma address of data buffer, high 32 bits */
-+} dma_info_t;
++	/* chipcommon cores prior to rev6 don't support slowclkcontrol */
++	if (si->sb.ccrev < 6)
++		return 1;
 +
-+#ifdef BCMDMA64
-+#define	DMA64_ENAB(di)	((di)->dma64)
-+#else
-+#define	DMA64_ENAB(di)	(0)
-+#endif
++	/* chipcommon cores rev10 are a whole new ball game */
++	if (si->sb.ccrev >= 10)
++		return 1;
 +
-+/* descriptor bumping macros */
-+#define	XXD(x, n)	((x) & ((n) - 1))
-+#define	TXD(x)		XXD((x), di->ntxd)
-+#define	RXD(x)		XXD((x), di->nrxd)
-+#define	NEXTTXD(i)	TXD(i + 1)
-+#define	PREVTXD(i)	TXD(i - 1)
-+#define	NEXTRXD(i)	RXD(i + 1)
-+#define	NTXDACTIVE(h, t)	TXD(t - h)
-+#define	NRXDACTIVE(h, t)	RXD(t - h)
++	if (set && ((*div % 4) || (*div < 4)))
++		return 2;
++	
++	INTR_OFF(si, intr_val);
++	origidx = si->curidx;
++	cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
++	ASSERT(cc != NULL);
++	
++	if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL)) {
++		err = 3;
++		goto done;
++	}
 +
-+/* macros to convert between byte offsets and indexes */
-+#define	B2I(bytes, type)	((bytes) / sizeof(type))
-+#define	I2B(index, type)	((index) * sizeof(type))
++	if (set) {
++		SET_REG(&cc->slow_clk_ctl, SCC_CD_MASK, ((*div / 4 - 1) << SCC_CD_SHIFT));
++		sb_clkctl_setdelay(sbh, (void *)cc);
++	} else
++		*div = 4 * (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
 +
-+#define	PCI32ADDR_HIGH		0xc0000000	/* address[31:30] */
-+#define	PCI32ADDR_HIGH_SHIFT	30
++done:
++	sb_setcoreidx(sbh, origidx);
++	INTR_RESTORE(si, intr_val);
++	return err;
++}
 +
++/* initialize power control delay registers */
++void sb_clkctl_init(sb_t *sbh)
++{
++	sb_info_t *si;
++	uint origidx;
++	chipcregs_t *cc;
 +
-+/* prototypes */
-+static bool dma_isaddrext(dma_info_t *di);
-+static bool dma_alloc(dma_info_t *di, uint direction);
++	si = SB_INFO(sbh);
 +
-+static bool dma32_alloc(dma_info_t *di, uint direction);
-+static void dma32_txreset(dma_info_t *di);
-+static void dma32_rxreset(dma_info_t *di);
-+static bool dma32_txsuspendedidle(dma_info_t *di);
-+static int  dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags);
-+static void* dma32_getnexttxp(dma_info_t *di, bool forceall);
-+static void* dma32_getnextrxp(dma_info_t *di, bool forceall);
-+static void dma32_txrotate(di_t *di);
++	origidx = si->curidx;
 +
-+/* prototype or stubs */
-+#ifdef BCMDMA64
-+static bool dma64_alloc(dma_info_t *di, uint direction);
-+static void dma64_txreset(dma_info_t *di);
-+static void dma64_rxreset(dma_info_t *di);
-+static bool dma64_txsuspendedidle(dma_info_t *di);
-+static int  dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags);
-+static void* dma64_getnexttxp(dma_info_t *di, bool forceall);
-+static void* dma64_getnextrxp(dma_info_t *di, bool forceall);
-+static void dma64_txrotate(di_t *di);
-+#else
-+static bool dma64_alloc(dma_info_t *di, uint direction) { return TRUE; }
-+static void dma64_txreset(dma_info_t *di) {}
-+static void dma64_rxreset(dma_info_t *di) {}
-+static bool dma64_txsuspendedidle(dma_info_t *di) { return TRUE;}
-+static int  dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags) { return 0; }
-+static void* dma64_getnexttxp(dma_info_t *di, bool forceall) { return NULL; }
-+static void* dma64_getnextrxp(dma_info_t *di, bool forceall) { return NULL; }
-+static void dma64_txrotate(di_t *di) { return; }
-+#endif
++	if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
++		return;
 +
-+/* old dmaregs struct for compatibility */
-+typedef volatile struct {
-+	/* transmit channel */
-+	uint32  xmtcontrol;         /* enable, et al */
-+	uint32  xmtaddr;            /* descriptor ring base address (4K aligned) */
-+	uint32  xmtptr;             /* last descriptor posted to chip */
-+	uint32  xmtstatus;          /* current active descriptor, et al */
-+	
-+	/* receive channel */
-+	uint32  rcvcontrol;         /* enable, et al */
-+	uint32  rcvaddr;            /* descriptor ring base address (4K aligned) */
-+	uint32  rcvptr;             /* last descriptor posted to chip */
-+	uint32  rcvstatus;          /* current active descriptor, et al */
-+} dmaregs_t;
++	if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
++		goto done;
 +
-+typedef struct {
-+	uint ddoffset;
-+	uint dataoffset;
-+} compat_data;
++	/* 4317pc does not work with SlowClock less than 5 MHz */
++	if ((BUSTYPE(si->sb.bustype) == PCMCIA_BUS) && (si->sb.ccrev >= 6) && (si->sb.ccrev < 10))
++		SET_REG(&cc->slow_clk_ctl, SCC_CD_MASK, (ILP_DIV_5MHZ << SCC_CD_SHIFT));
 +
-+static compat_data *ugly_hack = NULL;
++	/* set all Instaclk chip ILP to 1 MHz */
++	else if (si->sb.ccrev >= 10)
++		SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK, (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
++	
++	sb_clkctl_setdelay(si, (void *)cc);
 +
-+void* 
-+dma_attold(void *drv, void *osh, char *name, dmaregs_t *regs, uint ntxd, uint nrxd,
-+		uint rxbufsize, uint nrxpost, uint rxoffset, uint ddoffset, uint dataoffset, uint *msg_level)
++done:
++	sb_setcoreidx(sbh, origidx);
++}
++void sb_pwrctl_init(sb_t *sbh)
 +{
-+	dma32regs_t *dtx = regs;
-+	dma32regs_t *drx = dtx + 1;
-+	
-+	ugly_hack = kmalloc(sizeof(ugly_hack), GFP_KERNEL);
-+	ugly_hack->ddoffset = ddoffset;
-+	ugly_hack->dataoffset = dataoffset;
-+	dma_attach((osl_t *) osh, name, NULL, dtx, drx, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, msg_level);
-+	ugly_hack = NULL;
++sb_clkctl_init(sbh);
 +}
++/* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
++uint16
++sb_clkctl_fast_pwrup_delay(sb_t *sbh)
++{
++	sb_info_t *si;
++	uint origidx;
++	chipcregs_t *cc;
++	uint slowminfreq;
++	uint16 fpdelay;
++	uint intr_val = 0;
 +
++	si = SB_INFO(sbh);
++	fpdelay = 0;
++	origidx = si->curidx;
 +
-+void* 
-+dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx,
-+	   uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level)
-+{
-+	dma_info_t *di;
-+	uint size;
++	INTR_OFF(si, intr_val);
 +
-+	/* allocate private info structure */
-+	if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) {
-+		return (NULL);
-+	}
-+	bzero((char*)di, sizeof (dma_info_t));
++	if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
++		goto done;
 +
-+	di->msg_level = msg_level ? msg_level : &dma_msg_level;
++	if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
++		goto done;
 +
-+	if (sbh != NULL)
-+		di->dma64 = ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64);
++	slowminfreq = sb_slowclk_freq(si, FALSE);
++	fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) + (slowminfreq - 1)) / slowminfreq;
 +
-+#ifndef BCMDMA64
-+	if (di->dma64) {
-+		DMA_ERROR(("dma_attach: driver doesn't have the capability to support 64 bits DMA\n"));
-+		goto fail;
-+	}
-+#endif
-+	
-+	/* check arguments */
-+	ASSERT(ISPOWEROF2(ntxd));
-+	ASSERT(ISPOWEROF2(nrxd));
-+	if (nrxd == 0)
-+		ASSERT(dmaregsrx == NULL);
-+	if (ntxd == 0)
-+		ASSERT(dmaregstx == NULL);
++done:
++	sb_setcoreidx(sbh, origidx);
++	INTR_RESTORE(si, intr_val);
++	return (fpdelay);
++}
++uint16 sb_pwrctl_fast_pwrup_delay(sb_t *sbh)
++{
++return sb_clkctl_fast_pwrup_delay(sbh);
++}
++/* turn primary xtal and/or pll off/on */
++int
++sb_clkctl_xtal(sb_t *sbh, uint what, bool on)
++{
++	sb_info_t *si;
++	uint32 in, out, outen;
 +
++	si = SB_INFO(sbh);
 +
-+	/* init dma reg pointer */
-+	if (di->dma64) {
-+		ASSERT(ntxd <= D64MAXDD);
-+		ASSERT(nrxd <= D64MAXDD);
-+		di->d64txregs = (dma64regs_t *)dmaregstx;
-+		di->d64rxregs = (dma64regs_t *)dmaregsrx;
++	switch (BUSTYPE(si->sb.bustype)) {
 +
-+		di->dma64align = D64RINGALIGN;
-+		if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
-+			/* for smaller dd table, HW relax the alignment requirement */
-+			di->dma64align = D64RINGALIGN / 2;
-+		}
-+	} else {
-+		ASSERT(ntxd <= D32MAXDD);
-+		ASSERT(nrxd <= D32MAXDD);
-+		di->d32txregs = (dma32regs_t *)dmaregstx;
-+		di->d32rxregs = (dma32regs_t *)dmaregsrx;
-+	}
 +
++		case PCMCIA_BUS:
++			return (0);
 +
-+	/* make a private copy of our callers name */
-+	strncpy(di->name, name, MAXNAMEL);
-+	di->name[MAXNAMEL-1] = '\0';
 +
-+	di->osh = osh;
-+	di->sbh = sbh;
++		case PCI_BUS:
 +
-+	/* save tunables */
-+	di->ntxd = ntxd;
-+	di->nrxd = nrxd;
-+	di->rxbufsize = rxbufsize;
-+	di->nrxpost = nrxpost;
-+	di->rxoffset = rxoffset;
++			/* pcie core doesn't have any mapping to control the xtal pu */
++			if (PCIE(si))
++				return -1;
 +
-+	/* 
-+	 * figure out the DMA physical address offset for dd and data 
-+	 *   for old chips w/o sb, use zero
-+	 *   for new chips w sb, 
-+	 *     PCI/PCIE: they map silicon backplace address to zero based memory, need offset
-+	 *     Other bus: use zero
-+	 *     SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
-+	 */
-+	di->ddoffsetlow = 0;
-+	di->dataoffsetlow = 0;
-+	if (ugly_hack != NULL) {
-+		di->ddoffsetlow = ugly_hack->ddoffset;
-+		di->dataoffsetlow = ugly_hack->dataoffset;
-+		di->ddoffsethigh = 0;
-+		di->dataoffsethigh = 0;
-+	} else if (sbh != NULL) {	
-+		if (sbh->bustype == PCI_BUS) {  /* for pci bus, add offset */
-+			if ((sbh->buscoretype == SB_PCIE) && di->dma64){
-+				di->ddoffsetlow = 0;
-+				di->ddoffsethigh = SB_PCIE_DMA_H32;
++			in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof (uint32));
++			out = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32));
++			outen = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32));
++
++			/*
++			 * Avoid glitching the clock if GPRS is already using it.
++			 * We can't actually read the state of the PLLPD so we infer it
++			 * by the value of XTAL_PU which *is* readable via gpioin.
++			 */
++			if (on && (in & PCI_CFG_GPIO_XTAL))
++				return (0);
++
++			if (what & XTAL)
++				outen |= PCI_CFG_GPIO_XTAL;
++			if (what & PLL)
++				outen |= PCI_CFG_GPIO_PLL;
++
++			if (on) {
++				/* turn primary xtal on */
++				if (what & XTAL) {
++					out |= PCI_CFG_GPIO_XTAL;
++					if (what & PLL)
++						out |= PCI_CFG_GPIO_PLL;
++					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
++					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32), outen);
++					OSL_DELAY(XTAL_ON_DELAY);
++				}
++
++				/* turn pll on */
++				if (what & PLL) {
++					out &= ~PCI_CFG_GPIO_PLL;
++					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
++					OSL_DELAY(2000);
++				}
 +			} else {
-+				di->ddoffsetlow = SB_PCI_DMA;
-+				di->ddoffsethigh = 0;
++				if (what & XTAL)
++					out &= ~PCI_CFG_GPIO_XTAL;
++				if (what & PLL)
++					out |= PCI_CFG_GPIO_PLL;
++				OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
++				OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32), outen);
 +			}
-+			di->dataoffsetlow =  di->ddoffsetlow;
-+			di->dataoffsethigh =  di->ddoffsethigh;
-+		} 
-+#if defined(__mips__) && defined(IL_BIGENDIAN)
-+		/* use sdram swapped region for data buffers but not dma descriptors */
-+		di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED;
-+#endif
++
++		default:
++			return (-1);
 +	}
 +
-+	di->addrext = ((ugly_hack == NULL) ? dma_isaddrext(di) : 0);
++	return (0);
++}
 +
-+	DMA_TRACE(("%s: dma_attach: osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d rxoffset %d ddoffset 0x%x dataoffset 0x%x\n", 
-+		   name, osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, di->ddoffsetlow, di->dataoffsetlow));
++int sb_pwrctl_xtal(sb_t *sbh, uint what, bool on)
++{
++return sb_clkctl_xtal(sbh,what,on);
++}
 +
-+	/* allocate tx packet pointer vector */
-+	if (ntxd) {
-+		size = ntxd * sizeof (void*);
-+		if ((di->txp = MALLOC(osh, size)) == NULL) {
-+			DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
-+			goto fail;
-+		}
-+		bzero((char*)di->txp, size);
-+	}
++/* set dynamic clk control mode (forceslow, forcefast, dynamic) */
++/*   returns true if ignore pll off is set and false if it is not */
++bool
++sb_clkctl_clk(sb_t *sbh, uint mode)
++{
++	sb_info_t *si;
++	uint origidx;
++	chipcregs_t *cc;
++	uint32 scc;
++	bool forcefastclk=FALSE;
++	uint intr_val = 0;
 +
-+	/* allocate rx packet pointer vector */
-+	if (nrxd) {
-+		size = nrxd * sizeof (void*);
-+		if ((di->rxp = MALLOC(osh, size)) == NULL) {
-+			DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
-+			goto fail;
-+		}
-+		bzero((char*)di->rxp, size);
-+	} 
++	si = SB_INFO(sbh);
 +
-+	/* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
-+	if (ntxd) {
-+		if (!dma_alloc(di, DMA_TX))
-+			goto fail;
-+	}
++	/* chipcommon cores prior to rev6 don't support dynamic clock control */
++	if (si->sb.ccrev < 6)
++		return (FALSE);
 +
-+	/* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
-+	if (nrxd) {
-+		if (!dma_alloc(di, DMA_RX))
-+			goto fail;
-+	}
++	/* chipcommon cores rev10 are a whole new ball game */
++	if (si->sb.ccrev >= 10)
++		return (FALSE);
 +
-+	if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) {
-+		DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n", di->name, di->txdpa));
-+		goto fail;
-+	}
-+	if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) {
-+		DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n", di->name, di->rxdpa));
-+		goto fail;
-+	}
++	INTR_OFF(si, intr_val);
 +
-+	return ((void*)di);
++	origidx = si->curidx;
 +
-+fail:
-+	dma_detach((void*)di);
-+	return (NULL);
-+}
++	cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
++	ASSERT(cc != NULL);
 +
-+static bool
-+dma_alloc(dma_info_t *di, uint direction)
-+{
-+	if (DMA64_ENAB(di)) {
-+		return dma64_alloc(di, direction);
-+	} else {
-+		return dma32_alloc(di, direction);
-+	}
-+}
++	if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
++		goto done;
 +
-+/* may be called with core in reset */
-+void
-+dma_detach(dma_info_t *di)
-+{
-+	if (di == NULL)
-+		return;
++	switch (mode) {
++	case CLK_FAST:	/* force fast (pll) clock */
++		/* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
++		sb_clkctl_xtal(&si->sb, XTAL, ON);
 +
-+	DMA_TRACE(("%s: dma_detach\n", di->name));
++		SET_REG(&cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
++		break;
 +
-+	/* shouldn't be here if descriptors are unreclaimed */
-+	ASSERT(di->txin == di->txout);
-+	ASSERT(di->rxin == di->rxout);
++	case CLK_DYNAMIC:	/* enable dynamic clock control */
++		scc = R_REG(&cc->slow_clk_ctl);
++		scc &= ~(SCC_FS | SCC_IP | SCC_XC);
++		if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
++			scc |= SCC_XC;
++		W_REG(&cc->slow_clk_ctl, scc);
 +
-+	/* free dma descriptor rings */
-+	if (di->txd32)
-+		DMA_FREE_CONSISTENT(di->osh, ((int8*)di->txd32 - di->txdalign), di->txdalloc, (di->txdpa - di->txdalign));
-+	if (di->rxd32)
-+		DMA_FREE_CONSISTENT(di->osh, ((int8*)di->rxd32 - di->rxdalign), di->rxdalloc, (di->rxdpa - di->rxdalign));
++		/* for dynamic control, we have to release our xtal_pu "force on" */
++		if (scc & SCC_XC)
++			sb_clkctl_xtal(&si->sb, XTAL, OFF);
++		break;
 +
-+	/* free packet pointer vectors */
-+	if (di->txp)
-+		MFREE(di->osh, (void*)di->txp, (di->ntxd * sizeof (void*)));
-+	if (di->rxp)
-+		MFREE(di->osh, (void*)di->rxp, (di->nrxd * sizeof (void*)));
++	default:
++		ASSERT(0);
++	}
 +
-+	/* free our private info structure */
-+	MFREE(di->osh, (void*)di, sizeof (dma_info_t));
++	/* Is the h/w forcing the use of the fast clk */
++	forcefastclk = (bool)((R_REG(&cc->slow_clk_ctl) & SCC_IP) == SCC_IP);
++
++done:
++	sb_setcoreidx(sbh, origidx);
++	INTR_RESTORE(si, intr_val);
++	return (forcefastclk);
 +}
 +
-+/* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
-+static bool
-+dma_isaddrext(dma_info_t *di)
++bool sb_pwrctl_clk(sb_t *sbh, uint mode)
 +{
-+	uint32 w;
-+
-+	if (DMA64_ENAB(di)) {
-+		OR_REG(&di->d64txregs->control, D64_XC_AE);
-+		w = R_REG(&di->d32txregs->control);
-+		AND_REG(&di->d32txregs->control, ~D64_XC_AE);
-+		return ((w & XC_AE) == D64_XC_AE);
-+	} else {
-+		OR_REG(&di->d32txregs->control, XC_AE);
-+		w = R_REG(&di->d32txregs->control);
-+		AND_REG(&di->d32txregs->control, ~XC_AE);
-+		return ((w & XC_AE) == XC_AE);
-+	}
++return sb_clkctl_clk(sbh, mode);
 +}
-+
++/* register driver interrupt disabling and restoring callback functions */
 +void
-+dma_txreset(dma_info_t *di)
++sb_register_intr_callback(sb_t *sbh, void *intrsoff_fn, void *intrsrestore_fn, void *intrsenabled_fn, void *intr_arg)
 +{
-+	DMA_TRACE(("%s: dma_txreset\n", di->name));
++	sb_info_t *si;
 +
-+	if (DMA64_ENAB(di))
-+		dma64_txreset(di);
-+	else
-+		dma32_txreset(di);
++	si = SB_INFO(sbh);
++	si->intr_arg = intr_arg;
++	si->intrsoff_fn = (sb_intrsoff_t)intrsoff_fn;
++	si->intrsrestore_fn = (sb_intrsrestore_t)intrsrestore_fn;
++	si->intrsenabled_fn = (sb_intrsenabled_t)intrsenabled_fn;
++	/* save current core id.  when this function called, the current core
++	 * must be the core which provides driver functions(il, et, wl, etc.)
++	 */
++	si->dev_coreid = si->coreid[si->curidx];
 +}
 +
++
 +void
-+dma_rxreset(dma_info_t *di)
++sb_corepciid(sb_t *sbh, uint16 *pcivendor, uint16 *pcidevice, 
++	uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif)
 +{
-+	DMA_TRACE(("%s: dma_rxreset\n", di->name));
++	uint vendor, core, unit;
++	uint chip, chippkg;
++	char varname[8];
++	uint8 class, subclass, progif;
++	
++	vendor = sb_corevendor(sbh);
++	core = sb_coreid(sbh);
++	unit = sb_coreunit(sbh);
 +
-+	if (DMA64_ENAB(di))
-+		dma64_rxreset(di);
-+	else
-+		dma32_rxreset(di);
-+}
++	chip = BCMINIT(sb_chip)(sbh);
++	chippkg = BCMINIT(sb_chippkg)(sbh);
 +
-+/* initialize descriptor table base address */
-+static void
-+dma_ddtable_init(dma_info_t *di, uint direction, ulong pa)
-+{
-+	if (DMA64_ENAB(di)) {
-+		if (direction == DMA_TX) {
-+			W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
-+			W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
-+		} else {
-+			W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
-+			W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
-+		}
-+	} else {
-+		uint32 offset = di->ddoffsetlow;
-+		if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
-+			if (direction == DMA_TX)	
-+				W_REG(&di->d32txregs->addr, (pa + offset));
-+			else
-+				W_REG(&di->d32rxregs->addr, (pa + offset));
-+		} else {        
-+			/* dma32 address extension */
-+			uint32 ae;
-+			ASSERT(di->addrext);
-+			ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
++	progif = 0;
 +	
-+			if (direction == DMA_TX) {
-+				W_REG(&di->d32txregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset));
-+				SET_REG(&di->d32txregs->control, XC_AE, (ae << XC_AE_SHIFT));
-+			} else {
-+				W_REG(&di->d32rxregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset));
-+				SET_REG(&di->d32rxregs->control, RC_AE, (ae << RC_AE_SHIFT));
++	/* Known vendor translations */
++	switch (vendor) {
++	case SB_VEND_BCM:
++		vendor = VENDOR_BROADCOM;
++		break;
++	}
++
++	/* Determine class based on known core codes */
++	switch (core) {
++	case SB_ILINE20:
++		class = PCI_CLASS_NET;
++		subclass = PCI_NET_ETHER;
++		core = BCM47XX_ILINE_ID;
++		break;
++	case SB_ENET:
++		class = PCI_CLASS_NET;
++		subclass = PCI_NET_ETHER;
++		core = BCM47XX_ENET_ID;
++		break;
++	case SB_SDRAM:
++	case SB_MEMC:
++		class = PCI_CLASS_MEMORY;
++		subclass = PCI_MEMORY_RAM;
++		break;
++	case SB_PCI:
++	case SB_PCIE:
++		class = PCI_CLASS_BRIDGE;
++		subclass = PCI_BRIDGE_PCI;
++		break;
++	case SB_MIPS:
++	case SB_MIPS33:
++		class = PCI_CLASS_CPU;
++		subclass = PCI_CPU_MIPS;
++		break;
++	case SB_CODEC:
++		class = PCI_CLASS_COMM;
++		subclass = PCI_COMM_MODEM;
++		core = BCM47XX_V90_ID;
++		break;
++	case SB_USB:
++		class = PCI_CLASS_SERIAL;
++		subclass = PCI_SERIAL_USB;
++		progif = 0x10; /* OHCI */
++		core = BCM47XX_USB_ID;
++		break;
++	case SB_USB11H:
++		class = PCI_CLASS_SERIAL;
++		subclass = PCI_SERIAL_USB;
++		progif = 0x10; /* OHCI */
++		core = BCM47XX_USBH_ID;
++		break;
++	case SB_USB11D:
++		class = PCI_CLASS_SERIAL;
++		subclass = PCI_SERIAL_USB;
++		core = BCM47XX_USBD_ID;
++		break;
++	case SB_IPSEC:
++		class = PCI_CLASS_CRYPT;
++		subclass = PCI_CRYPT_NETWORK;
++		core = BCM47XX_IPSEC_ID;
++		break;
++	case SB_ROBO:
++		class = PCI_CLASS_NET;
++		subclass = PCI_NET_OTHER;
++		core = BCM47XX_ROBO_ID;
++		break;
++	case SB_EXTIF:
++	case SB_CC:
++		class = PCI_CLASS_MEMORY;
++		subclass = PCI_MEMORY_FLASH;
++		break;
++	case SB_D11:
++		class = PCI_CLASS_NET;
++		subclass = PCI_NET_OTHER;
++		/* Let an nvram variable override this */
++		sprintf(varname, "wl%did", unit);
++		if ((core = getintvar(NULL, varname)) == 0) {
++			if (chip == BCM4712_DEVICE_ID) {
++				if (chippkg == BCM4712SMALL_PKG_ID)
++					core = BCM4306_D11G_ID;
++				else
++					core = BCM4306_D11DUAL_ID;
 +			}
 +		}
++		break;
++
++	default:
++		class = subclass = progif = 0xff;
++		break;
 +	}
++
++	*pcivendor = (uint16)vendor;
++	*pcidevice = (uint16)core;
++	*pciclass = class;
++	*pcisubclass = subclass;
++	*pciprogif = progif;
 +}
 +
-+/* init the tx or rx descriptor */
-+static INLINE void
-+dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, ulong pa, uint outidx, uint32 *ctrl)
-+{
-+	uint offset = di->dataoffsetlow;
 +
-+	if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
-+		W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + offset));
-+		W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*ctrl));
-+	} else {        
-+		/* address extension */
-+		uint32 ae;
-+		ASSERT(di->addrext);
-+		ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
 +
-+		*ctrl |= (ae << CTRL_AE_SHIFT);
-+		W_SM(&ddring[outidx].addr, BUS_SWAP32((pa & ~PCI32ADDR_HIGH) + offset));
-+		W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*ctrl));
-+	}
-+}
 +
-+/* init the tx or rx descriptor */
-+static INLINE void
-+dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, ulong pa, uint outidx, uint32 *flags, uint32 bufcount)
++/* use the mdio interface to write to mdio slaves */
++static int 
++sb_pcie_mdiowrite(sb_info_t *si,  uint physmedia, uint regaddr, uint val)
 +{
-+	uint32 bufaddr_low = pa + di->dataoffsetlow;
-+	uint32 bufaddr_high = 0 + di->dataoffsethigh;
++	uint mdiodata;
++	uint i = 0;
++	sbpcieregs_t *pcieregs;
 +
-+	uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
++	pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
++	ASSERT (pcieregs);
 +
-+	W_SM(&ddring[outidx].addrlow, BUS_SWAP32(bufaddr_low));
-+	W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(bufaddr_high));
-+	W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
-+	W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
-+}
++	/* enable mdio access to SERDES */		
++	W_REG((&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
 +
-+void
-+dma_txinit(dma_info_t *di)
-+{
-+	DMA_TRACE(("%s: dma_txinit\n", di->name));
++	mdiodata = MDIODATA_START | MDIODATA_WRITE | 
++		(physmedia << MDIODATA_DEVADDR_SHF) |
++		(regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA | val;
 +
-+	di->txin = di->txout = 0;
-+	di->txavail = di->ntxd - 1;
++	W_REG((&pcieregs->mdiodata), mdiodata);
 +
-+	/* clear tx descriptor ring */
-+	if (DMA64_ENAB(di)) {
-+		BZERO_SM((void*)di->txd64, (di->ntxd * sizeof (dma64dd_t)));
-+		W_REG(&di->d64txregs->control, XC_XE);
-+		dma_ddtable_init(di, DMA_TX, di->txdpa);
-+	} else {
-+		BZERO_SM((void*)di->txd32, (di->ntxd * sizeof (dma32dd_t)));
-+		W_REG(&di->d32txregs->control, XC_XE);
-+		dma_ddtable_init(di, DMA_TX, di->txdpa);
-+	}
-+}
++	PR28829_DELAY();
 +
-+bool
-+dma_txenabled(dma_info_t *di)
-+{
-+	uint32 xc;
-+	
-+	/* If the chip is dead, it is not enabled :-) */
-+	if (DMA64_ENAB(di)) {
-+		xc = R_REG(&di->d64txregs->control);
-+		return ((xc != 0xffffffff) && (xc & D64_XC_XE));
-+	} else {
-+		xc = R_REG(&di->d32txregs->control);
-+		return ((xc != 0xffffffff) && (xc & XC_XE));
++	/* retry till the transaction is complete */
++	while ( i < 10 ) {
++		if (R_REG(&(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
++			/* Disable mdio access to SERDES */		
++			W_REG((&pcieregs->mdiocontrol), 0);
++			return 0;
++		}
++		OSL_DELAY(1000);
++		i++;
 +	}
-+}
-+
-+void
-+dma_txsuspend(dma_info_t *di)
-+{
-+	DMA_TRACE(("%s: dma_txsuspend\n", di->name));
-+	if (DMA64_ENAB(di))
-+		OR_REG(&di->d64txregs->control, D64_XC_SE);
-+	else
-+		OR_REG(&di->d32txregs->control, XC_SE);
-+}
 +
-+void
-+dma_txresume(dma_info_t *di)
-+{
-+	DMA_TRACE(("%s: dma_txresume\n", di->name));
-+	if (DMA64_ENAB(di))
-+		AND_REG(&di->d64txregs->control, ~D64_XC_SE);
-+	else
-+		AND_REG(&di->d32txregs->control, ~XC_SE);
-+}
++	SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
++	/* Disable mdio access to SERDES */		
++	W_REG((&pcieregs->mdiocontrol), 0);
++	ASSERT(0);
++	return 1; 
 +
-+bool
-+dma_txsuspendedidle(dma_info_t *di)
-+{
-+	if (DMA64_ENAB(di))
-+		return dma64_txsuspendedidle(di);
-+	else
-+		return dma32_txsuspendedidle(di);
 +}
 +
-+bool
-+dma_txsuspended(dma_info_t *di)
++/* indirect way to read pcie config regs*/
++uint 
++sb_pcie_readreg(void *sb, void* arg1, uint offset)
 +{
-+	if (DMA64_ENAB(di))
-+		return ((R_REG(&di->d64txregs->control) & D64_XC_SE) == D64_XC_SE);
-+	else
-+		return ((R_REG(&di->d32txregs->control) & XC_SE) == XC_SE);
-+}
++	sb_info_t *si;
++	sb_t   *sbh;
++	uint retval = 0xFFFFFFFF;
++	sbpcieregs_t *pcieregs;	
++	uint addrtype;
 +
-+bool
-+dma_txstopped(dma_info_t *di)
-+{
-+	if (DMA64_ENAB(di))
-+		return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_STOPPED);
-+	else
-+		return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_STOPPED);
-+}
++	sbh = (sb_t *)sb;
++	si = SB_INFO(sbh);
++	ASSERT (PCIE(si)); 
 +
-+bool
-+dma_rxstopped(dma_info_t *di)
-+{
-+	if (DMA64_ENAB(di))
-+		return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) == D64_RS0_RS_STOPPED);
-+	else
-+		return ((R_REG(&di->d32rxregs->status) & RS_RS_MASK) == RS_RS_STOPPED);
-+}
++	pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
++	ASSERT (pcieregs);
 +
-+void
-+dma_fifoloopbackenable(dma_info_t *di)
-+{
-+	DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
-+	if (DMA64_ENAB(di))
-+		OR_REG(&di->d64txregs->control, D64_XC_LE);
-+	else
-+		OR_REG(&di->d32txregs->control, XC_LE);
++	addrtype = (uint)((uintptr)arg1);
++	switch(addrtype) {
++		case PCIE_CONFIGREGS:
++			W_REG((&pcieregs->configaddr),offset);
++			retval = R_REG(&(pcieregs->configdata));
++			break;
++		case PCIE_PCIEREGS:
++			W_REG(&(pcieregs->pcieaddr),offset);
++			retval = R_REG(&(pcieregs->pciedata));
++			break;
++		default:
++			ASSERT(0); 
++			break;
++	}
++	return retval;
 +}
 +
-+void
-+dma_rxinit(dma_info_t *di)
++/* indirect way to write pcie config/mdio/pciecore regs*/
++uint 
++sb_pcie_writereg(sb_t *sbh, void *arg1,  uint offset, uint val)
 +{
-+	DMA_TRACE(("%s: dma_rxinit\n", di->name));
-+
-+	di->rxin = di->rxout = 0;
++	sb_info_t *si;
++	sbpcieregs_t *pcieregs;	
++	uint addrtype;
 +
-+	/* clear rx descriptor ring */
-+	if (DMA64_ENAB(di)) {
-+                BZERO_SM((void*)di->rxd64, (di->nrxd * sizeof (dma64dd_t)));
-+		dma_rxenable(di);
-+		dma_ddtable_init(di, DMA_RX, di->rxdpa);
-+	} else {
-+		BZERO_SM((void*)di->rxd32, (di->nrxd * sizeof (dma32dd_t)));
-+		dma_rxenable(di);
-+		dma_ddtable_init(di, DMA_RX, di->rxdpa);
-+	}
-+}
++	si = SB_INFO(sbh);
++	ASSERT (PCIE(si)); 
 +
-+void
-+dma_rxenable(dma_info_t *di)
-+{
-+	DMA_TRACE(("%s: dma_rxenable\n", di->name));
-+	if (DMA64_ENAB(di))
-+		W_REG(&di->d64rxregs->control, ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
-+	else
-+		W_REG(&di->d32rxregs->control, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
-+}
++	pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
++	ASSERT (pcieregs);
 +
-+bool
-+dma_rxenabled(dma_info_t *di)
-+{
-+	uint32 rc;
++	addrtype = (uint)((uintptr)arg1);
 +
-+	if (DMA64_ENAB(di)) { 
-+		rc = R_REG(&di->d64rxregs->control);
-+		return ((rc != 0xffffffff) && (rc & D64_RC_RE));
-+	} else {
-+		rc = R_REG(&di->d32rxregs->control);
-+		return ((rc != 0xffffffff) && (rc & RC_RE));
++	switch(addrtype) {
++		case PCIE_CONFIGREGS:
++			W_REG((&pcieregs->configaddr),offset);
++			W_REG((&pcieregs->configdata),val);
++			break;
++		case PCIE_PCIEREGS:
++			W_REG((&pcieregs->pcieaddr),offset);
++			W_REG((&pcieregs->pciedata),val);
++			break;
++		default:
++			ASSERT(0); 
++			break;
 +	}
++	return 0;
 +}
 +
 +
-+/* !! tx entry routine */
++/* Build device path. Support SB, PCI, and JTAG for now. */
 +int
-+dma_txfast(dma_info_t *di, void *p0, uint32 coreflags)
++sb_devpath(sb_t *sbh, char *path, int size)
 +{
-+	if (DMA64_ENAB(di)) { 
-+		return dma64_txfast(di, p0, coreflags);
-+	} else {
-+		return dma32_txfast(di, p0, coreflags);
++	ASSERT(path);
++	ASSERT(size >= SB_DEVPATH_BUFSZ);
++	
++	switch (BUSTYPE((SB_INFO(sbh))->sb.bustype)) {
++	case SB_BUS:
++	case JTAG_BUS:
++		sprintf(path, "sb/%u/", sb_coreidx(sbh));
++		break;
++	case PCI_BUS:
++		ASSERT((SB_INFO(sbh))->osh);
++		sprintf(path, "pci/%u/%u/", OSL_PCI_BUS((SB_INFO(sbh))->osh),
++			OSL_PCI_SLOT((SB_INFO(sbh))->osh));
++		break;
++	case PCMCIA_BUS:
++		SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
++		SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
++		sprintf(path, "pc/%u/%u/", 1, 1);
++		break;
++	case SDIO_BUS:
++		SB_ERROR(("sb_devpath: device 0 assumed\n"));
++		sprintf(path, "sd/%u/", sb_coreidx(sbh));
++		break;
++	default:
++		ASSERT(0);
++		break;
 +	}
++
++	return 0;
 +}
 +
-+/* !! rx entry routine, returns a pointer to the next frame received, or NULL if there are no more */
-+void*
-+dma_rx(dma_info_t *di)
++/* Fix chip's configuration. The current core may be changed upon return */
++static int
++sb_pci_fixcfg(sb_info_t *si)
 +{
-+	void *p;
-+	uint len;
-+	int skiplen = 0;
-+
-+	while ((p = dma_getnextrxp(di, FALSE))) {
-+		/* skip giant packets which span multiple rx descriptors */
-+		if (skiplen > 0) {
-+			skiplen -= di->rxbufsize;
-+			if (skiplen < 0)
-+				skiplen = 0;
-+			PKTFREE(di->osh, p, FALSE);
-+			continue;
-+		}
++	uint origidx, pciidx;
++	sbpciregs_t *pciregs;
++	sbpcieregs_t *pcieregs;
++	uint16 val16, *reg16;
++	char name[SB_DEVPATH_BUFSZ+16], *value;
++	char devpath[SB_DEVPATH_BUFSZ];
 +
-+		len = ltoh16(*(uint16*)(PKTDATA(di->osh, p)));
-+		DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
++	ASSERT(BUSTYPE(si->sb.bustype) == PCI_BUS);
 +
-+		/* bad frame length check */
-+		if (len > (di->rxbufsize - di->rxoffset)) {
-+			DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len));
-+			if (len > 0)
-+				skiplen = len - (di->rxbufsize - di->rxoffset);
-+			PKTFREE(di->osh, p, FALSE);
-+			di->hnddma.rxgiants++;
-+			continue;
-+		}
++	/* Fix PCI(e) SROM shadow area */
++	/* save the current index */
++	origidx = sb_coreidx(&si->sb);
 +
-+		/* set actual length */
-+		PKTSETLEN(di->osh, p, (di->rxoffset + len));
++	/* check 'pi' is correct and fix it if not */
++	if (si->sb.buscoretype == SB_PCIE) {
++		pcieregs = (sbpcieregs_t *)sb_setcore(&si->sb, SB_PCIE, 0);
++		ASSERT(pcieregs);
++		reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
++	}
++	else if (si->sb.buscoretype == SB_PCI) {
++		pciregs = (sbpciregs_t *)sb_setcore(&si->sb, SB_PCI, 0);
++		ASSERT(pciregs);
++		reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
++	}
++	else {
++		ASSERT(0);
++		return -1;
++	}
++	pciidx = sb_coreidx(&si->sb);
++	val16 = R_REG(reg16);
++	if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
++		val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
++		W_REG(reg16, val16);
++	}
 +
-+		break;
++	/* restore the original index */
++	sb_setcoreidx(&si->sb, origidx);
++	
++	/* Fix bar0window */
++	/* !do it last, it changes the current core! */
++	if (sb_devpath(&si->sb, devpath, sizeof(devpath)))
++		return -1;
++	sprintf(name, "%sb0w", devpath);
++	if ((value = getvar(NULL, name))) {
++		OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32),
++			bcm_strtoul(value, NULL, 16));
++		/* update curidx since the current core is changed */
++		si->curidx = _sb_coreidx(si);
++		if (si->curidx == BADIDX) {
++			SB_ERROR(("sb_pci_fixcfg: bad core index\n"));
++			return -1;
++		}
 +	}
 +
-+	return (p);
++	return 0;
 +}
 +
-+/* post receive buffers */
-+void
-+dma_rxfill(dma_info_t *di)
-+{
-+	void *p;
-+	uint rxin, rxout;
-+	uint32 ctrl;
-+	uint n;
-+	uint i;
-+	uint32 pa;
-+	uint rxbufsize;
+diff -Naur linux.old/drivers/net/hnd/shared_ksyms.sh linux.dev/drivers/net/hnd/shared_ksyms.sh
+--- linux.old/drivers/net/hnd/shared_ksyms.sh	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/hnd/shared_ksyms.sh	2006-04-06 15:34:15.000000000 +0200
+@@ -0,0 +1,21 @@
++#!/bin/sh
++#
++# Copyright 2004, Broadcom Corporation      
++# All Rights Reserved.      
++#       
++# THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
++# KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
++# SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
++# FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
++#
++# $Id: shared_ksyms.sh,v 1.1 2005/03/16 13:50:00 wbx Exp $
++#
 +
-+	/*
-+	 * Determine how many receive buffers we're lacking
-+	 * from the full complement, allocate, initialize,
-+	 * and post them, then update the chip rx lastdscr.
-+	 */
++cat <<EOF
++#include <linux/config.h>
++#include <linux/module.h>
++EOF
 +
-+	rxin = di->rxin;
-+	rxout = di->rxout;
-+	rxbufsize = di->rxbufsize;
++for file in $* ; do
++    ${NM} $file | sed -ne 's/[0-9A-Fa-f]* [DT] \([^ ]*\)/extern void \1; EXPORT_SYMBOL(\1);/p'
++done
+diff -Naur linux.old/drivers/net/wireless/Config.in linux.dev/drivers/net/wireless/Config.in
+--- linux.old/drivers/net/wireless/Config.in	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/drivers/net/wireless/Config.in	2006-04-06 17:04:48.000000000 +0200
+@@ -13,6 +13,8 @@
+ fi
+ 
+ if [ "$CONFIG_PCI" = "y" ]; then
++   dep_tristate '    Proprietary Broadcom BCM43xx 802.11 Wireless support (old)' CONFIG_WL
++   dep_tristate '    Proprietary Broadcom BCM43xx 802.11 Wireless support (new)' CONFIG_WL2
+    dep_tristate '    Hermes in PLX9052 based PCI adaptor support (Netgear MA301 etc.) (EXPERIMENTAL)' CONFIG_PLX_HERMES $CONFIG_HERMES $CONFIG_EXPERIMENTAL
+    dep_tristate '    Hermes in TMD7160/NCP130 based PCI adaptor support (Pheecom WL-PCI etc.) (EXPERIMENTAL)' CONFIG_TMD_HERMES $CONFIG_HERMES $CONFIG_EXPERIMENTAL
+    dep_tristate '    Prism 2.5 PCI 802.11b adaptor support (EXPERIMENTAL)' CONFIG_PCI_HERMES $CONFIG_HERMES $CONFIG_EXPERIMENTAL
+diff -Naur linux.old/drivers/net/wl/Makefile linux.dev/drivers/net/wl/Makefile
+--- linux.old/drivers/net/wl/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl/Makefile	2006-04-06 16:56:38.000000000 +0200
+@@ -0,0 +1,27 @@
++#
++# Makefile for the Broadcom wl driver
++#
++# Copyright 2004, Broadcom Corporation
++# All Rights Reserved.
++# 
++# THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
++# KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
++# SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
++# FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
++#
++# $Id: Makefile,v 1.2 2005/03/29 03:32:18 mbm Exp $
 +
-+	n = di->nrxpost - NRXDACTIVE(rxin, rxout);
++EXTRA_CFLAGS += -I$(TOPDIR)/arch/mips/bcm947xx/include -DBCMDRIVER
 +
-+	DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
++O_TARGET	:= wl.o
 +
-+	for (i = 0; i < n; i++) {
-+		if ((p = PKTGET(di->osh, rxbufsize, FALSE)) == NULL) {
-+			DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
-+			di->hnddma.rxnobuf++;
-+			break;
-+		}
++obj-y		:= apsta_aeskeywrap.o apsta_aes.o apsta_bcmwpa.o apsta_d11ucode.o
++obj-y		+= apsta_hmac.o apsta_md5.o apsta_passhash.o apsta_prf.o apsta_rc4.o
++obj-y		+= apsta_rijndael-alg-fst.o apsta_sha1.o apsta_tkhash.o apsta_wlc_led.o
++obj-y		+= apsta_wlc_phy.o apsta_wlc_rate.o apsta_wlc_security.o 
++obj-y		+= apsta_wlc_sup.o apsta_wlc_wet.o apsta_wl_linux.o apsta_wlc.o
++obj-y		+= compat.o hnddma.o
 +
-+		/* Do a cached write instead of uncached write since DMA_MAP
-+		 * will flush the cache. */
-+		*(uint32*)(PKTDATA(di->osh, p)) = 0;
++obj-m		:= $(O_TARGET)
 +
-+		pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->osh, p), rxbufsize, DMA_RX, p);
-+		ASSERT(ISALIGNED(pa, 4));
++include $(TOPDIR)/Rules.make
+diff -Naur linux.old/drivers/net/wl/compat.c linux.dev/drivers/net/wl/compat.c
+--- linux.old/drivers/net/wl/compat.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl/compat.c	2006-04-06 17:12:19.000000000 +0200
+@@ -0,0 +1,237 @@
++/*
++ * Misc useful OS-independent routines.
++ *
++ * Copyright 2005, Broadcom Corporation      
++ * All Rights Reserved.      
++ *       
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
++ * $Id$
++ */
 +
-+		/* save the free packet pointer */
-+		ASSERT(di->rxp[rxout] == NULL);
-+		di->rxp[rxout] = p;
++#include <typedefs.h>
++#ifdef BCMDRIVER
++#include <osl.h>
++#include <sbutils.h>
++#include <bcmnvram.h>
++#else
++#include <stdio.h>
++#include <string.h>
++#endif
++#include "pktq.h"
++#include <bcmutils.h>
++#include <bcmendian.h>
++#include <bcmdevs.h>
 +
-+		if (DMA64_ENAB(di)) {
-+			/* prep the descriptor control value */
-+			if (rxout == (di->nrxd - 1))
-+				ctrl = CTRL_EOT;
++#ifdef BCMDRIVER
++/* copy a pkt buffer chain into a buffer */
++uint
++pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
++{
++	uint n, ret = 0;
 +
-+			dma64_dd_upd(di, di->rxd64, pa, rxout, &ctrl, rxbufsize);
-+		} else {
-+			/* prep the descriptor control value */
-+			ctrl = rxbufsize;
-+			if (rxout == (di->nrxd - 1))
-+				ctrl |= CTRL_EOT;
-+			dma32_dd_upd(di, di->rxd32, pa, rxout, &ctrl);
-+		}
++	if (len < 0)
++		len = 4096;	/* "infinite" */
 +
-+		rxout = NEXTRXD(rxout);
++	/* skip 'offset' bytes */
++	for (; p && offset; p = PKTNEXT(osh, p)) {
++		if (offset < (uint)PKTLEN(osh, p))
++			break;
++		offset -= PKTLEN(osh, p);
 +	}
 +
-+	di->rxout = rxout;
++	if (!p)
++		return 0;
 +
-+	/* update the chip lastdscr pointer */
-+	if (DMA64_ENAB(di)) {
-+		W_REG(&di->d64rxregs->ptr, I2B(rxout, dma64dd_t));
-+	} else {
-+		W_REG(&di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
++	/* copy the data */
++	for (; p && len; p = PKTNEXT(osh, p)) {
++		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
++		bcopy(PKTDATA(osh, p) + offset, buf, n);
++		buf += n;
++		len -= n;
++		ret += n;
++		offset = 0;
 +	}
++
++	return ret;
 +}
 +
-+void
-+dma_txreclaim(dma_info_t *di, bool forceall)
++/* return total length of buffer chain */
++uint
++pkttotlen(osl_t *osh, void *p)
 +{
-+	void *p;
-+
-+	DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
++	uint total;
 +
-+	while ((p = dma_getnexttxp(di, forceall)))
-+		PKTFREE(di->osh, p, TRUE);
++	total = 0;
++	for (; p; p = PKTNEXT(osh, p))
++		total += PKTLEN(osh, p);
++	return (total);
 +}
 +
-+/*
-+ * Reclaim next completed txd (txds if using chained buffers) and
-+ * return associated packet.
-+ * If 'force' is true, reclaim txd(s) and return associated packet
-+ * regardless of the value of the hardware "curr" pointer.
-+ */
-+void*
-+dma_getnexttxp(dma_info_t *di, bool forceall)
++void
++pktq_init(struct pktq *q, uint maxlen, const uint8 prio_map[])
 +{
-+	if (DMA64_ENAB(di)) {
-+		return dma64_getnexttxp(di, forceall);
-+	} else {
-+		return dma32_getnexttxp(di, forceall);
++	q->head = q->tail = NULL;
++	q->maxlen = maxlen;
++	q->len = 0;
++	if (prio_map) {
++		q->priority = TRUE;
++		bcopy(prio_map, q->prio_map, sizeof(q->prio_map));
 +	}
++	else
++		q->priority = FALSE;
 +}
-+	
-+/* like getnexttxp but no reclaim */
-+void*
-+dma_peeknexttxp(dma_info_t *di)
++
++
++/* should always check pktq_full before calling pktenq */
++void
++pktenq(struct pktq *q, void *p, bool lifo)
 +{
-+	uint end, i;
++	void *next, *prev;
 +
-+	if (DMA64_ENAB(di)) {
-+		end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
-+	} else {
-+		end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
++	/* allow 10 pkts slack */
++	ASSERT(q->len < (q->maxlen + 10));
++
++	/* Queueing chains not allowed */
++	ASSERT(PKTLINK(p) == NULL);
++
++	/* Queue is empty */
++	if (q->tail == NULL) {
++		ASSERT(q->head == NULL);
++		q->head = q->tail = p;
 +	}
 +
-+	for (i = di->txin; i != end; i = NEXTTXD(i))
-+		if (di->txp[i])
-+			return (di->txp[i]);
++	/* Insert at head or tail */
++	else if (q->priority == FALSE) {
++		/* Insert at head (LIFO) */
++		if (lifo) {
++			PKTSETLINK(p, q->head);
++			q->head = p;
++		}
++		/* Insert at tail (FIFO) */
++		else {
++			ASSERT(PKTLINK(q->tail) == NULL);
++			PKTSETLINK(q->tail, p);
++			PKTSETLINK(p, NULL);
++			q->tail = p;
++		}
++	}
 +
-+	return (NULL);
-+}
++	/* Insert by priority */
++	else {
++		/* legal priorities 0-7 */
++		ASSERT(PKTPRIO(p) <= MAXPRIO);
 +
-+/*
-+ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
-+ */
-+void
-+dma_txrotate(di_t *di)
-+{
-+	if (DMA64_ENAB(di)) {
-+		dma64_txrotate(di);
-+	} else {
-+		dma32_txrotate(di);
++		ASSERT(q->head);
++		ASSERT(q->tail);
++		/* Shortcut to insertion at tail */
++		if (_pktq_pri(q, PKTPRIO(p)) < _pktq_pri(q, PKTPRIO(q->tail)) ||
++		    (!lifo && _pktq_pri(q, PKTPRIO(p)) <= _pktq_pri(q, PKTPRIO(q->tail)))) {
++			prev = q->tail;
++			next = NULL;
++		}
++		/* Insert at head or in the middle */
++		else {
++			prev = NULL;
++			next = q->head;
++		}
++		/* Walk the queue */
++		for (; next; prev = next, next = PKTLINK(next)) {
++			/* Priority queue invariant */
++			ASSERT(!prev || _pktq_pri(q, PKTPRIO(prev)) >= _pktq_pri(q, PKTPRIO(next)));
++			/* Insert at head of string of packets of same priority (LIFO) */
++			if (lifo) {
++				if (_pktq_pri(q, PKTPRIO(p)) >= _pktq_pri(q, PKTPRIO(next)))
++					break;
++			}
++			/* Insert at tail of string of packets of same priority (FIFO) */
++			else {
++				if (_pktq_pri(q, PKTPRIO(p)) > _pktq_pri(q, PKTPRIO(next)))
++					break;
++			}
++		}
++		/* Insert at tail */
++		if (next == NULL) {
++			ASSERT(PKTLINK(q->tail) == NULL);
++			PKTSETLINK(q->tail, p);
++			PKTSETLINK(p, NULL);
++			q->tail = p;
++		}
++		/* Insert in the middle */
++		else if (prev) {
++			PKTSETLINK(prev, p);
++			PKTSETLINK(p, next);
++		}
++		/* Insert at head */
++		else {
++			PKTSETLINK(p, q->head);
++			q->head = p;
++		}
 +	}
++
++	/* List invariants after insertion */
++	ASSERT(q->head);
++	ASSERT(PKTLINK(q->tail) == NULL);
++
++	q->len++;
 +}
 +
-+void
-+dma_rxreclaim(dma_info_t *di)
++/* dequeue packet at head */
++void*
++pktdeq(struct pktq *q)
 +{
 +	void *p;
 +
-+	DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
++	if ((p = q->head)) {
++		ASSERT(q->tail);
++		q->head = PKTLINK(p);
++		PKTSETLINK(p, NULL);
++		q->len--;
++		if (q->head == NULL)
++			q->tail = NULL;
++	}
++	else {
++		ASSERT(q->tail == NULL);
++	}
 +
-+	while ((p = dma_getnextrxp(di, TRUE)))
-+		PKTFREE(di->osh, p, FALSE);
++	return (p);
 +}
 +
-+void *
-+dma_getnextrxp(dma_info_t *di, bool forceall)
++/* dequeue packet at tail */
++void*
++pktdeqtail(struct pktq *q)
 +{
-+	if (DMA64_ENAB(di)) {
-+		return dma64_getnextrxp(di, forceall);
-+	} else {
-+		return dma32_getnextrxp(di, forceall);
++	void *p;
++	void *next, *prev;
++
++	if (q->head == q->tail) {  /* last packet on queue or queue empty */
++		p = q->head;
++		q->head = q->tail = NULL;
++		q->len = 0;
++		return(p);
 +	}
-+}
 +
-+uintptr
-+dma_getvar(dma_info_t *di, char *name)
-+{
-+	if (!strcmp(name, "&txavail"))
-+		return ((uintptr) &di->txavail);
-+	else {
-+		ASSERT(0);
++	/* start walk at head */
++	prev = NULL;
++	next = q->head;
++
++	/* Walk the queue to find prev of q->tail */
++	for (; next; prev = next, next = PKTLINK(next)) {
++		if (next == q->tail)
++			break;
 +	}
-+	return (0);
-+}
 +
-+void
-+dma_txblock(dma_info_t *di)
-+{
-+	di->txavail = 0;
-+}
++	ASSERT(prev);
 +
-+void
-+dma_txunblock(dma_info_t *di)
-+{
-+	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+}
++	PKTSETLINK(prev, NULL);
++	q->tail = prev;
++	q->len--;
++	p = next;
 +
-+uint
-+dma_txactive(dma_info_t *di)
-+{
-+	return (NTXDACTIVE(di->txin, di->txout));
-+}
-+	
-+void
-+dma_rxpiomode(dma32regs_t *regs)
-+{
-+	W_REG(&regs->control, RC_FM);
++	return (p);
 +}
++#endif
 +
-+void
-+dma_txpioloopback(dma32regs_t *regs)
-+{
-+	OR_REG(&regs->control, XC_LE);
-+}
 +
+diff -Naur linux.old/drivers/net/wl/hnddma.c linux.dev/drivers/net/wl/hnddma.c
+--- linux.old/drivers/net/wl/hnddma.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl/hnddma.c	2006-04-06 16:58:24.000000000 +0200
+@@ -0,0 +1,1453 @@
++/*
++ * Generic Broadcom Home Networking Division (HND) DMA module.
++ * This supports the following chips: BCM42xx, 44xx, 47xx .
++ *
++ * Copyright 2005, Broadcom Corporation
++ * All Rights Reserved.
++ * 
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
++ *
++ * $Id$
++ */
 +
++#include <typedefs.h>
++#include <osl.h>
++#include <bcmendian.h>
++#include <sbconfig.h>
++#include <bcmutils.h>
++#include <bcmdevs.h>
++#include <sbutils.h>
 +
++struct dma_info;	/* forward declaration */
++#define di_t struct dma_info
 +
-+/*** 32 bits DMA non-inline functions ***/
-+static bool
-+dma32_alloc(dma_info_t *di, uint direction)
-+{
-+	uint size;
-+	uint ddlen;
-+	void *va;
++#include "sbhnddma.h"
++#include "hnddma.h"
 +
-+	ddlen = sizeof (dma32dd_t);
++/* debug/trace */
++#define	DMA_ERROR(args)
++#define	DMA_TRACE(args)
 +
-+	size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
++/* default dma message level (if input msg_level pointer is null in dma_attach()) */
++static uint dma_msg_level =
++	0;
 +
-+	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, D32RINGALIGN))
-+		size += D32RINGALIGN;
++#define	MAXNAMEL	8
 +
++/* dma engine software state */
++typedef struct dma_info {
++	hnddma_t	hnddma;		/* exported structure */
++	uint		*msg_level;	/* message level pointer */
++	char		name[MAXNAMEL];	/* callers name for diag msgs */
++	
++	void		*osh;		/* os handle */
++	sb_t		*sbh;		/* sb handle */
++	
++	bool		dma64;		/* dma64 enabled */
++	bool		addrext;	/* this dma engine supports DmaExtendedAddrChanges */
++	
++	dma32regs_t	*d32txregs;	/* 32 bits dma tx engine registers */
++	dma32regs_t	*d32rxregs;	/* 32 bits dma rx engine registers */
++	dma64regs_t	*d64txregs;	/* 64 bits dma tx engine registers */
++	dma64regs_t	*d64rxregs;	/* 64 bits dma rx engine registers */
 +
-+	if (direction == DMA_TX) {
-+		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
-+			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
-+			return FALSE;
-+		}
++	uint32		dma64align;	/* either 8k or 4k depends on number of dd */
++	dma32dd_t	*txd32;		/* pointer to dma32 tx descriptor ring */
++	dma64dd_t	*txd64;		/* pointer to dma64 tx descriptor ring */
++	uint		ntxd;		/* # tx descriptors tunable */	
++	uint		txin;		/* index of next descriptor to reclaim */
++	uint		txout;		/* index of next descriptor to post */
++	uint		txavail;	/* # free tx descriptors */
++	void		**txp;		/* pointer to parallel array of pointers to packets */
++	ulong		txdpa;		/* physical address of descriptor ring */
++	uint		txdalign;	/* #bytes added to alloc'd mem to align txd */
++	uint		txdalloc;	/* #bytes allocated for the ring */
 +
-+		di->txd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
-+		di->txdalign = (uint)((int8*)di->txd32 - (int8*)va);
-+		di->txdpa += di->txdalign;
-+		di->txdalloc = size;
-+		ASSERT(ISALIGNED((uintptr)di->txd32, D32RINGALIGN));
-+	} else {
-+		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
-+			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
-+			return FALSE;
-+		}
-+		di->rxd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
-+		di->rxdalign = (uint)((int8*)di->rxd32 - (int8*)va);
-+		di->rxdpa += di->rxdalign;
-+		di->rxdalloc = size;
-+		ASSERT(ISALIGNED((uintptr)di->rxd32, D32RINGALIGN));
-+	}
++	dma32dd_t	*rxd32;		/* pointer to dma32 rx descriptor ring */
++	dma64dd_t	*rxd64;		/* pointer to dma64 rx descriptor ring */
++	uint		nrxd;		/* # rx descriptors tunable */	
++	uint		rxin;		/* index of next descriptor to reclaim */
++	uint		rxout;		/* index of next descriptor to post */
++	void		**rxp;		/* pointer to parallel array of pointers to packets */
++	ulong		rxdpa;		/* physical address of descriptor ring */
++	uint		rxdalign;	/* #bytes added to alloc'd mem to align rxd */
++	uint		rxdalloc;	/* #bytes allocated for the ring */
 +
-+	return TRUE;
-+}
++	/* tunables */
++	uint		rxbufsize;	/* rx buffer size in bytes */
++	uint		nrxpost;	/* # rx buffers to keep posted */
++	uint		rxoffset;	/* rxcontrol offset */
++	uint		ddoffsetlow;	/* add to get dma address of descriptor ring, low 32 bits */
++	uint		ddoffsethigh;	/* add to get dma address of descriptor ring, high 32 bits */
++	uint		dataoffsetlow;	/* add to get dma address of data buffer, low 32 bits */
++	uint		dataoffsethigh;	/* add to get dma address of data buffer, high 32 bits */
++} dma_info_t;
 +
-+static void 
-+dma32_txreset(dma_info_t *di)
-+{
-+	uint32 status;
++#ifdef BCMDMA64
++#define	DMA64_ENAB(di)	((di)->dma64)
++#else
++#define	DMA64_ENAB(di)	(0)
++#endif
 +
-+	/* suspend tx DMA first */
-+	W_REG(&di->d32txregs->control, XC_SE);
-+	SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED &&
-+		 status != XS_XS_IDLE &&
-+		 status != XS_XS_STOPPED,
-+		 10000);
++/* descriptor bumping macros */
++#define	XXD(x, n)	((x) & ((n) - 1))
++#define	TXD(x)		XXD((x), di->ntxd)
++#define	RXD(x)		XXD((x), di->nrxd)
++#define	NEXTTXD(i)	TXD(i + 1)
++#define	PREVTXD(i)	TXD(i - 1)
++#define	NEXTRXD(i)	RXD(i + 1)
++#define	NTXDACTIVE(h, t)	TXD(t - h)
++#define	NRXDACTIVE(h, t)	RXD(t - h)
 +
-+	W_REG(&di->d32txregs->control, 0);
-+	SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED,
-+		 10000);
++/* macros to convert between byte offsets and indexes */
++#define	B2I(bytes, type)	((bytes) / sizeof(type))
++#define	I2B(index, type)	((index) * sizeof(type))
 +
-+	if (status != XS_XS_DISABLED) {
-+		DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
-+	}
++#define	PCI32ADDR_HIGH		0xc0000000	/* address[31:30] */
++#define	PCI32ADDR_HIGH_SHIFT	30
 +
-+	/* wait for the last transaction to complete */
-+	OSL_DELAY(300);
-+}
 +
-+static void 
-+dma32_rxreset(dma_info_t *di)
-+{
-+	uint32 status;
++/* prototypes */
++static bool dma_isaddrext(dma_info_t *di);
++static bool dma_alloc(dma_info_t *di, uint direction);
 +
-+	W_REG(&di->d32rxregs->control, 0);
-+	SPINWAIT((status = (R_REG(&di->d32rxregs->status) & RS_RS_MASK)) != RS_RS_DISABLED,
-+		 10000);
++static bool dma32_alloc(dma_info_t *di, uint direction);
++static void dma32_txreset(dma_info_t *di);
++static void dma32_rxreset(dma_info_t *di);
++static bool dma32_txsuspendedidle(dma_info_t *di);
++static int  dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags);
++static void* dma32_getnexttxp(dma_info_t *di, bool forceall);
++static void* dma32_getnextrxp(dma_info_t *di, bool forceall);
++static void dma32_txrotate(di_t *di);
 +
-+	if (status != RS_RS_DISABLED) {
-+		DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
-+	}
-+}
++/* prototype or stubs */
++#ifdef BCMDMA64
++static bool dma64_alloc(dma_info_t *di, uint direction);
++static void dma64_txreset(dma_info_t *di);
++static void dma64_rxreset(dma_info_t *di);
++static bool dma64_txsuspendedidle(dma_info_t *di);
++static int  dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags);
++static void* dma64_getnexttxp(dma_info_t *di, bool forceall);
++static void* dma64_getnextrxp(dma_info_t *di, bool forceall);
++static void dma64_txrotate(di_t *di);
++#else
++static bool dma64_alloc(dma_info_t *di, uint direction) { return TRUE; }
++static void dma64_txreset(dma_info_t *di) {}
++static void dma64_rxreset(dma_info_t *di) {}
++static bool dma64_txsuspendedidle(dma_info_t *di) { return TRUE;}
++static int  dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags) { return 0; }
++static void* dma64_getnexttxp(dma_info_t *di, bool forceall) { return NULL; }
++static void* dma64_getnextrxp(dma_info_t *di, bool forceall) { return NULL; }
++static void dma64_txrotate(di_t *di) { return; }
++#endif
 +
-+static bool
-+dma32_txsuspendedidle(dma_info_t *di)
-+{
-+	if (!(R_REG(&di->d32txregs->control) & XC_SE))
-+		return 0;
++/* old dmaregs struct for compatibility */
++typedef volatile struct {
++	/* transmit channel */
++	uint32  xmtcontrol;         /* enable, et al */
++	uint32  xmtaddr;            /* descriptor ring base address (4K aligned) */
++	uint32  xmtptr;             /* last descriptor posted to chip */
++	uint32  xmtstatus;          /* current active descriptor, et al */
 +	
-+	if ((R_REG(&di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
-+		return 0;
-+
-+	OSL_DELAY(2);
-+	return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_IDLE);
-+}
++	/* receive channel */
++	uint32  rcvcontrol;         /* enable, et al */
++	uint32  rcvaddr;            /* descriptor ring base address (4K aligned) */
++	uint32  rcvptr;             /* last descriptor posted to chip */
++	uint32  rcvstatus;          /* current active descriptor, et al */
++} dmaregs_t;
 +
-+/*
-+ * supports full 32bit dma engine buffer addressing so
-+ * dma buffers can cross 4 Kbyte page boundaries.
-+ */
-+static int
-+dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags)
++void* 
++dma_attach(void *drv, void *osh, char *name, dmaregs_t *regs, uint ntxd, uint nrxd,
++		uint rxbufsize, uint nrxpost, uint rxoffset, uint ddoffset, uint dataoffset, uint *msg_level)
 +{
-+	void *p, *next;
-+	uchar *data;
-+	uint len;
-+	uint txout;
-+	uint32 ctrl;
-+	uint32 pa;	
-+
-+	DMA_TRACE(("%s: dma_txfast\n", di->name));
++	dma_info_t *di;
++	uint size;
++	dma32regs_t *dmaregstx = regs;
++	dma32regs_t *dmaregsrx = dmaregstx + 1;
++	
++	/* allocate private info structure */
++	if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) {
++		return (NULL);
++	}
++	bzero((char*)di, sizeof (dma_info_t));
++	di->msg_level = msg_level ? msg_level : &dma_msg_level;
 +
-+	txout = di->txout;
-+	ctrl = 0;
++	/* check arguments */
++	ASSERT(ISPOWEROF2(ntxd));
++	ASSERT(ISPOWEROF2(nrxd));
++	if (nrxd == 0)
++		ASSERT(dmaregsrx == NULL);
++	if (ntxd == 0)
++		ASSERT(dmaregstx == NULL);
 +
-+	/*
-+	 * Walk the chain of packet buffers
-+	 * allocating and initializing transmit descriptor entries.
-+	 */
-+	for (p = p0; p; p = next) {
-+		data = PKTDATA(di->osh, p);
-+		len = PKTLEN(di->osh, p);
-+		next = PKTNEXT(di->osh, p);
++	ASSERT(ntxd <= D32MAXDD);
++	ASSERT(nrxd <= D32MAXDD);
++	di->d32txregs = (dma32regs_t *)dmaregstx;
++	di->d32rxregs = (dma32regs_t *)dmaregsrx;
 +
-+		/* return nonzero if out of tx descriptors */
-+		if (NEXTTXD(txout) == di->txin)
-+			goto outoftxd;
++	/* make a private copy of our callers name */
++	strncpy(di->name, name, MAXNAMEL);
++	di->name[MAXNAMEL-1] = '\0';
 +
-+		if (len == 0)
-+			continue;
++	di->osh = osh;
++	di->sbh = NULL;
 +
-+		/* get physical address of buffer start */
-+		pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
++	/* save tunables */
++	di->ntxd = ntxd;
++	di->nrxd = nrxd;
++	di->rxbufsize = rxbufsize;
++	di->nrxpost = nrxpost;
++	di->rxoffset = rxoffset;
 +
-+		/* build the descriptor control value */
-+		ctrl = len & CTRL_BC_MASK;
++	di->ddoffsetlow = ddoffset;
++	di->dataoffsetlow = dataoffset;
++	di->ddoffsethigh = 0;
++	di->dataoffsethigh = 0;
++	
++	di->addrext = 0;
++	
++	DMA_TRACE(("%s: dma_attach: osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d rxoffset %d ddoffset 0x%x dataoffset 0x%x\n", 
++		   name, osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, di->ddoffsetlow, di->dataoffsetlow));
 +
-+		ctrl |= coreflags;
-+		
-+		if (p == p0)
-+			ctrl |= CTRL_SOF;
-+		if (next == NULL)
-+			ctrl |= (CTRL_IOC | CTRL_EOF);
-+		if (txout == (di->ntxd - 1))
-+			ctrl |= CTRL_EOT;
++	/* allocate tx packet pointer vector */
++	if (ntxd) {
++		size = ntxd * sizeof (void*);
++		if ((di->txp = MALLOC(osh, size)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
++			goto fail;
++		}
++		bzero((char*)di->txp, size);
++	}
 +
-+		if (DMA64_ENAB(di)) {
-+			dma64_dd_upd(di, di->txd64, pa, txout, &ctrl, len);
-+		} else {
-+			dma32_dd_upd(di, di->txd32, pa, txout, &ctrl);
++	/* allocate rx packet pointer vector */
++	if (nrxd) {
++		size = nrxd * sizeof (void*);
++		if ((di->rxp = MALLOC(osh, size)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
++			goto fail;
 +		}
++		bzero((char*)di->rxp, size);
++	} 
 +
-+		ASSERT(di->txp[txout] == NULL);
++	/* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
++	if (ntxd) {
++		if (!dma_alloc(di, DMA_TX))
++			goto fail;
++	}
 +
-+		txout = NEXTTXD(txout);
++	/* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
++	if (nrxd) {
++		if (!dma_alloc(di, DMA_RX))
++			goto fail;
 +	}
 +
-+	/* if last txd eof not set, fix it */
-+	if (!(ctrl & CTRL_EOF))
-+		W_SM(&di->txd32[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF));
++	if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) {
++		DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n", di->name, di->txdpa));
++		goto fail;
++	}
++	if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) {
++		DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n", di->name, di->rxdpa));
++		goto fail;
++	}
 +
-+	/* save the packet */
-+	di->txp[PREVTXD(txout)] = p0;
++	return ((void*)di);
 +
-+	/* bump the tx descriptor index */
-+	di->txout = txout;
++fail:
++	dma_detach((void*)di);
++	return (NULL);
++}
 +
-+	/* kick the chip */
++
++static bool
++dma_alloc(dma_info_t *di, uint direction)
++{
 +	if (DMA64_ENAB(di)) {
-+		W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
++		return dma64_alloc(di, direction);
 +	} else {
-+		W_REG(&di->d32txregs->ptr, I2B(txout, dma32dd_t));
++		return dma32_alloc(di, direction);
 +	}
-+
-+	/* tx flow control */
-+	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+	return (0);
-+
-+ outoftxd:
-+	DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
-+	PKTFREE(di->osh, p0, TRUE);
-+	di->txavail = 0;
-+	di->hnddma.txnobuf++;
-+	return (-1);
 +}
 +
-+static void*
-+dma32_getnexttxp(dma_info_t *di, bool forceall)
++/* may be called with core in reset */
++void
++dma_detach(dma_info_t *di)
 +{
-+	uint start, end, i;
-+	void *txp;
-+
-+	DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
-+
-+	txp = NULL;
-+
-+	start = di->txin;
-+	if (forceall)
-+		end = di->txout;
-+	else
-+		end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
-+
-+	if ((start == 0) && (end > di->txout))
-+		goto bogus;
-+
-+	for (i = start; i != end && !txp; i = NEXTTXD(i)) {
-+		DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - di->dataoffsetlow),
-+			  (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & CTRL_BC_MASK), DMA_TX, di->txp[i]);
++	if (di == NULL)
++		return;
 +
-+		W_SM(&di->txd32[i].addr, 0xdeadbeef);
-+		txp = di->txp[i];
-+		di->txp[i] = NULL;
-+	}
++	DMA_TRACE(("%s: dma_detach\n", di->name));
 +
-+	di->txin = i;
++	/* shouldn't be here if descriptors are unreclaimed */
++	ASSERT(di->txin == di->txout);
++	ASSERT(di->rxin == di->rxout);
 +
-+	/* tx flow control */
-+	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++	/* free dma descriptor rings */
++	if (di->txd32)
++		DMA_FREE_CONSISTENT(di->osh, ((int8*)di->txd32 - di->txdalign), di->txdalloc, (di->txdpa - di->txdalign));
++	if (di->rxd32)
++		DMA_FREE_CONSISTENT(di->osh, ((int8*)di->rxd32 - di->rxdalign), di->rxdalloc, (di->rxdpa - di->rxdalign));
 +
-+	return (txp);
++	/* free packet pointer vectors */
++	if (di->txp)
++		MFREE(di->osh, (void*)di->txp, (di->ntxd * sizeof (void*)));
++	if (di->rxp)
++		MFREE(di->osh, (void*)di->rxp, (di->nrxd * sizeof (void*)));
 +
-+bogus:
-+/*
-+	DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
-+		start, end, di->txout, forceall));
-+*/
-+	return (NULL);
++	/* free our private info structure */
++	MFREE(di->osh, (void*)di, sizeof (dma_info_t));
 +}
 +
-+static void *
-+dma32_getnextrxp(dma_info_t *di, bool forceall)
++/* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
++static bool
++dma_isaddrext(dma_info_t *di)
 +{
-+	uint i;
-+	void *rxp;
-+
-+	/* if forcing, dma engine must be disabled */
-+	ASSERT(!forceall || !dma_rxenabled(di));
-+
-+	i = di->rxin;
-+
-+	/* return if no packets posted */
-+	if (i == di->rxout)
-+		return (NULL);
++	uint32 w;
 +
-+	/* ignore curr if forceall */
-+	if (!forceall && (i == B2I(R_REG(&di->d32rxregs->status) & RS_CD_MASK, dma32dd_t)))
-+		return (NULL);
++	if (DMA64_ENAB(di)) {
++		OR_REG(&di->d64txregs->control, D64_XC_AE);
++		w = R_REG(&di->d32txregs->control);
++		AND_REG(&di->d32txregs->control, ~D64_XC_AE);
++		return ((w & XC_AE) == D64_XC_AE);
++	} else {
++		OR_REG(&di->d32txregs->control, XC_AE);
++		w = R_REG(&di->d32txregs->control);
++		AND_REG(&di->d32txregs->control, ~XC_AE);
++		return ((w & XC_AE) == XC_AE);
++	}
++}
 +
-+	/* get the packet pointer that corresponds to the rx descriptor */
-+	rxp = di->rxp[i];
-+	ASSERT(rxp);
-+	di->rxp[i] = NULL;
++void
++dma_txreset(dma_info_t *di)
++{
++	DMA_TRACE(("%s: dma_txreset\n", di->name));
 +
-+	/* clear this packet from the descriptor ring */
-+	DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - di->dataoffsetlow),
-+		  di->rxbufsize, DMA_RX, rxp);
-+	W_SM(&di->rxd32[i].addr, 0xdeadbeef);
++	if (DMA64_ENAB(di))
++		dma64_txreset(di);
++	else
++		dma32_txreset(di);
++}
 +
-+	di->rxin = NEXTRXD(i);
++void
++dma_rxreset(dma_info_t *di)
++{
++	DMA_TRACE(("%s: dma_rxreset\n", di->name));
 +
-+	return (rxp);
++	if (DMA64_ENAB(di))
++		dma64_rxreset(di);
++	else
++		dma32_rxreset(di);
 +}
 +
++/* initialize descriptor table base address */
 +static void
-+dma32_txrotate(di_t *di)
++dma_ddtable_init(dma_info_t *di, uint direction, ulong pa)
 +{
-+	uint ad;
-+	uint nactive;
-+	uint rot;
-+	uint old, new;
-+	uint32 w;
-+	uint first, last;
-+
-+	ASSERT(dma_txsuspendedidle(di));
-+
-+	nactive = dma_txactive(di);
-+	ad = B2I(((R_REG(&di->d32txregs->status) & XS_AD_MASK) >> XS_AD_SHIFT), dma32dd_t);
-+	rot = TXD(ad - di->txin);
-+
-+	ASSERT(rot < di->ntxd);
-+
-+	/* full-ring case is a lot harder - don't worry about this */
-+	if (rot >= (di->ntxd - nactive)) {
-+		DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
-+		return;
++	if (DMA64_ENAB(di)) {
++		if (direction == DMA_TX) {
++			W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
++			W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
++		} else {
++			W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
++			W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
++		}
++	} else {
++		uint32 offset = di->ddoffsetlow;
++		if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
++			if (direction == DMA_TX)	
++				W_REG(&di->d32txregs->addr, (pa + offset));
++			else
++				W_REG(&di->d32rxregs->addr, (pa + offset));
++		} else {        
++			/* dma32 address extension */
++			uint32 ae;
++			ASSERT(di->addrext);
++			ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
++	
++			if (direction == DMA_TX) {
++				W_REG(&di->d32txregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset));
++				SET_REG(&di->d32txregs->control, XC_AE, (ae << XC_AE_SHIFT));
++			} else {
++				W_REG(&di->d32rxregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset));
++				SET_REG(&di->d32rxregs->control, RC_AE, (ae << RC_AE_SHIFT));
++			}
++		}
 +	}
++}
 +
-+	first = di->txin;
-+	last = PREVTXD(di->txout);
-+
-+	/* move entries starting at last and moving backwards to first */
-+	for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
-+		new = TXD(old + rot);
-+
-+		/*
-+		 * Move the tx dma descriptor.
-+		 * EOT is set only in the last entry in the ring.
-+		 */
-+		w = R_SM(&di->txd32[old].ctrl) & ~CTRL_EOT;
-+		if (new == (di->ntxd - 1))
-+			w |= CTRL_EOT;
-+		W_SM(&di->txd32[new].ctrl, w);
-+		W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
++/* init the tx or rx descriptor */
++static INLINE void
++dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, ulong pa, uint outidx, uint32 *ctrl)
++{
++	uint offset = di->dataoffsetlow;
 +
-+		/* zap the old tx dma descriptor address field */
-+		W_SM(&di->txd32[old].addr, 0xdeadbeef);
++	if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
++		W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + offset));
++		W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*ctrl));
++	} else {        
++		/* address extension */
++		uint32 ae;
++		ASSERT(di->addrext);
++		ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
 +
-+		/* move the corresponding txp[] entry */
-+		ASSERT(di->txp[new] == NULL);
-+		di->txp[new] = di->txp[old];
-+		di->txp[old] = NULL;
++		*ctrl |= (ae << CTRL_AE_SHIFT);
++		W_SM(&ddring[outidx].addr, BUS_SWAP32((pa & ~PCI32ADDR_HIGH) + offset));
++		W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*ctrl));
 +	}
-+
-+	/* update txin and txout */
-+	di->txin = ad;
-+	di->txout = TXD(di->txout + rot);
-+	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+	/* kick the chip */
-+	W_REG(&di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
 +}
 +
-+/*** 64 bits DMA non-inline functions ***/
-+
-+#ifdef BCMDMA64
-+
-+static bool
-+dma64_alloc(dma_info_t *di, uint direction)
++/* init the tx or rx descriptor */
++static INLINE void
++dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, ulong pa, uint outidx, uint32 *flags, uint32 bufcount)
 +{
-+	uint size;
-+	uint ddlen;
-+	uint32 alignbytes;
-+	void *va;
-+
-+	ddlen = sizeof (dma64dd_t);
++	uint32 bufaddr_low = pa + di->dataoffsetlow;
++	uint32 bufaddr_high = 0 + di->dataoffsethigh;
 +
-+	size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
++	uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
 +
-+	alignbytes = di->dma64align;
++	W_SM(&ddring[outidx].addrlow, BUS_SWAP32(bufaddr_low));
++	W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(bufaddr_high));
++	W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
++	W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
++}
 +
-+	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, alignbytes))
-+		size += alignbytes;
++void
++dma_txinit(dma_info_t *di)
++{
++	DMA_TRACE(("%s: dma_txinit\n", di->name));
 +
++	di->txin = di->txout = 0;
++	di->txavail = di->ntxd - 1;
 +
-+	if (direction == DMA_TX) {
-+		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
-+			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
-+			return FALSE;
-+		}
++	/* clear tx descriptor ring */
++	if (DMA64_ENAB(di)) {
++		BZERO_SM((void*)di->txd64, (di->ntxd * sizeof (dma64dd_t)));
++		W_REG(&di->d64txregs->control, XC_XE);
++		dma_ddtable_init(di, DMA_TX, di->txdpa);
++	} else {
++		BZERO_SM((void*)di->txd32, (di->ntxd * sizeof (dma32dd_t)));
++		W_REG(&di->d32txregs->control, XC_XE);
++		dma_ddtable_init(di, DMA_TX, di->txdpa);
++	}
++}
 +
-+		di->txd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
-+		di->txdalign = (uint)((int8*)di->txd64 - (int8*)va);
-+		di->txdpa += di->txdalign;
-+		di->txdalloc = size;
-+		ASSERT(ISALIGNED((uintptr)di->txd64, alignbytes));
++bool
++dma_txenabled(dma_info_t *di)
++{
++	uint32 xc;
++	
++	/* If the chip is dead, it is not enabled :-) */
++	if (DMA64_ENAB(di)) {
++		xc = R_REG(&di->d64txregs->control);
++		return ((xc != 0xffffffff) && (xc & D64_XC_XE));
 +	} else {
-+		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
-+			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
-+			return FALSE;
-+		}
-+		di->rxd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
-+		di->rxdalign = (uint)((int8*)di->rxd64 - (int8*)va);
-+		di->rxdpa += di->rxdalign;
-+		di->rxdalloc = size;
-+		ASSERT(ISALIGNED((uintptr)di->rxd64, alignbytes));
++		xc = R_REG(&di->d32txregs->control);
++		return ((xc != 0xffffffff) && (xc & XC_XE));
 +	}
++}
 +
-+	return TRUE;
++void
++dma_txsuspend(dma_info_t *di)
++{
++	DMA_TRACE(("%s: dma_txsuspend\n", di->name));
++	if (DMA64_ENAB(di))
++		OR_REG(&di->d64txregs->control, D64_XC_SE);
++	else
++		OR_REG(&di->d32txregs->control, XC_SE);
 +}
 +
-+static void 
-+dma64_txreset(dma_info_t *di)
++void
++dma_txresume(dma_info_t *di)
 +{
-+	uint32 status;
++	DMA_TRACE(("%s: dma_txresume\n", di->name));
++	if (DMA64_ENAB(di))
++		AND_REG(&di->d64txregs->control, ~D64_XC_SE);
++	else
++		AND_REG(&di->d32txregs->control, ~XC_SE);
++}
 +
-+	/* suspend tx DMA first */
-+	W_REG(&di->d64txregs->control, D64_XC_SE);
-+	SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED &&
-+		 status != D64_XS0_XS_IDLE &&
-+		 status != D64_XS0_XS_STOPPED,
-+		 10000);
++bool
++dma_txsuspendedidle(dma_info_t *di)
++{
++	if (DMA64_ENAB(di))
++		return dma64_txsuspendedidle(di);
++	else
++		return dma32_txsuspendedidle(di);
++}
 +
-+	W_REG(&di->d64txregs->control, 0);
-+	SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED,
-+		 10000);
++bool
++dma_txsuspended(dma_info_t *di)
++{
++	if (DMA64_ENAB(di))
++		return ((R_REG(&di->d64txregs->control) & D64_XC_SE) == D64_XC_SE);
++	else
++		return ((R_REG(&di->d32txregs->control) & XC_SE) == XC_SE);
++}
 +
-+	if (status != D64_XS0_XS_DISABLED) {
-+		DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
-+	}
++bool
++dma_txstopped(dma_info_t *di)
++{
++	if (DMA64_ENAB(di))
++		return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_STOPPED);
++	else
++		return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_STOPPED);
++}
 +
-+	/* wait for the last transaction to complete */
-+	OSL_DELAY(300);
++bool
++dma_rxstopped(dma_info_t *di)
++{
++	if (DMA64_ENAB(di))
++		return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) == D64_RS0_RS_STOPPED);
++	else
++		return ((R_REG(&di->d32rxregs->status) & RS_RS_MASK) == RS_RS_STOPPED);
++}
++
++void
++dma_fifoloopbackenable(dma_info_t *di)
++{
++	DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
++	if (DMA64_ENAB(di))
++		OR_REG(&di->d64txregs->control, D64_XC_LE);
++	else
++		OR_REG(&di->d32txregs->control, XC_LE);
 +}
 +
-+static void 
-+dma64_rxreset(dma_info_t *di)
++void
++dma_rxinit(dma_info_t *di)
 +{
-+	uint32 status;
++	DMA_TRACE(("%s: dma_rxinit\n", di->name));
 +
-+	W_REG(&di->d64rxregs->control, 0);
-+	SPINWAIT((status = (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED,
-+		 10000);
++	di->rxin = di->rxout = 0;
 +
-+	if (status != D64_RS0_RS_DISABLED) {
-+		DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
++	/* clear rx descriptor ring */
++	if (DMA64_ENAB(di)) {
++                BZERO_SM((void*)di->rxd64, (di->nrxd * sizeof (dma64dd_t)));
++		dma_rxenable(di);
++		dma_ddtable_init(di, DMA_RX, di->rxdpa);
++	} else {
++		BZERO_SM((void*)di->rxd32, (di->nrxd * sizeof (dma32dd_t)));
++		dma_rxenable(di);
++		dma_ddtable_init(di, DMA_RX, di->rxdpa);
 +	}
 +}
 +
-+static bool
-+dma64_txsuspendedidle(dma_info_t *di)
++void
++dma_rxenable(dma_info_t *di)
 +{
-+
-+	if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
-+		return 0;
-+	
-+	if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_IDLE)
-+		return 1;
-+
-+	return 0;
++	DMA_TRACE(("%s: dma_rxenable\n", di->name));
++	if (DMA64_ENAB(di))
++		W_REG(&di->d64rxregs->control, ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
++	else
++		W_REG(&di->d32rxregs->control, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
 +}
 +
-+/*
-+ * supports full 32bit dma engine buffer addressing so
-+ * dma buffers can cross 4 Kbyte page boundaries.
-+ */
-+static int
-+dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags)
++bool
++dma_rxenabled(dma_info_t *di)
 +{
-+	void *p, *next;
-+	uchar *data;
-+	uint len;
-+	uint txout;
-+	uint32 flags;
-+	uint32 pa;	
-+
-+	DMA_TRACE(("%s: dma_txfast\n", di->name));
-+
-+	txout = di->txout;
-+	flags = 0;
-+
-+	/*
-+	 * Walk the chain of packet buffers
-+	 * allocating and initializing transmit descriptor entries.
-+	 */
-+	for (p = p0; p; p = next) {
-+		data = PKTDATA(di->osh, p);
-+		len = PKTLEN(di->osh, p);
-+		next = PKTNEXT(di->osh, p);
-+
-+		/* return nonzero if out of tx descriptors */
-+		if (NEXTTXD(txout) == di->txin)
-+			goto outoftxd;
-+
-+		if (len == 0)
-+			continue;
-+
-+		/* get physical address of buffer start */
-+		pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
-+
-+		flags = coreflags;
-+		
-+		if (p == p0)
-+			flags |= D64_CTRL1_SOF;
-+		if (next == NULL)
-+			flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
-+		if (txout == (di->ntxd - 1))
-+			flags |= D64_CTRL1_EOT;
-+
-+		dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
-+
-+		ASSERT(di->txp[txout] == NULL);
++	uint32 rc;
 +
-+		txout = NEXTTXD(txout);
++	if (DMA64_ENAB(di)) { 
++		rc = R_REG(&di->d64rxregs->control);
++		return ((rc != 0xffffffff) && (rc & D64_RC_RE));
++	} else {
++		rc = R_REG(&di->d32rxregs->control);
++		return ((rc != 0xffffffff) && (rc & RC_RE));
 +	}
++}
 +
-+	/* if last txd eof not set, fix it */
-+	if (!(flags & D64_CTRL1_EOF))
-+		W_SM(&di->txd64[PREVTXD(txout)].ctrl1, BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
-+
-+	/* save the packet */
-+	di->txp[PREVTXD(txout)] = p0;
-+
-+	/* bump the tx descriptor index */
-+	di->txout = txout;
-+
-+	/* kick the chip */
-+	W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
-+
-+	/* tx flow control */
-+	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+	return (0);
 +
-+outoftxd:
-+	DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
-+	PKTFREE(di->osh, p0, TRUE);
-+	di->txavail = 0;
-+	di->hnddma.txnobuf++;
-+	return (-1);
++/* !! tx entry routine */
++int
++dma_txfast(dma_info_t *di, void *p0, uint32 coreflags)
++{
++	if (DMA64_ENAB(di)) { 
++		return dma64_txfast(di, p0, coreflags);
++	} else {
++		return dma32_txfast(di, p0, coreflags);
++	}
 +}
 +
-+static void*
-+dma64_getnexttxp(dma_info_t *di, bool forceall)
++/* !! rx entry routine, returns a pointer to the next frame received, or NULL if there are no more */
++void*
++dma_rx(dma_info_t *di)
 +{
-+	uint start, end, i;
-+	void *txp;
-+
-+	DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
-+
-+	txp = NULL;
++	void *p;
++	uint len;
++	int skiplen = 0;
 +
-+	start = di->txin;
-+	if (forceall)
-+		end = di->txout;
-+	else
-+		end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
++	while ((p = dma_getnextrxp(di, FALSE))) {
++		/* skip giant packets which span multiple rx descriptors */
++		if (skiplen > 0) {
++			skiplen -= di->rxbufsize;
++			if (skiplen < 0)
++				skiplen = 0;
++			PKTFREE(di->osh, p, FALSE);
++			continue;
++		}
 +
-+	if ((start == 0) && (end > di->txout))
-+		goto bogus;
++		len = ltoh16(*(uint16*)(PKTDATA(di->osh, p)));
++		DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
 +
-+	for (i = start; i != end && !txp; i = NEXTTXD(i)) {
-+		DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) - di->dataoffsetlow),
-+			  (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) & D64_CTRL2_BC_MASK), DMA_TX, di->txp[i]);
++		/* bad frame length check */
++		if (len > (di->rxbufsize - di->rxoffset)) {
++			DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len));
++			if (len > 0)
++				skiplen = len - (di->rxbufsize - di->rxoffset);
++			PKTFREE(di->osh, p, FALSE);
++			di->hnddma.rxgiants++;
++			continue;
++		}
 +
-+		W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
-+		W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
++		/* set actual length */
++		PKTSETLEN(di->osh, p, (di->rxoffset + len));
 +
-+		txp = di->txp[i];
-+		di->txp[i] = NULL;
++		break;
 +	}
 +
-+	di->txin = i;
-+
-+	/* tx flow control */
-+	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+	return (txp);
-+
-+bogus:
-+/*
-+	DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
-+		start, end, di->txout, forceall));
-+*/
-+	return (NULL);
++	return (p);
 +}
 +
-+static void *
-+dma64_getnextrxp(dma_info_t *di, bool forceall)
++/* post receive buffers */
++void
++dma_rxfill(dma_info_t *di)
 +{
++	void *p;
++	uint rxin, rxout;
++	uint32 ctrl;
++	uint n;
 +	uint i;
-+	void *rxp;
++	uint32 pa;
++	uint rxbufsize;
 +
-+	/* if forcing, dma engine must be disabled */
-+	ASSERT(!forceall || !dma_rxenabled(di));
++	/*
++	 * Determine how many receive buffers we're lacking
++	 * from the full complement, allocate, initialize,
++	 * and post them, then update the chip rx lastdscr.
++	 */
 +
-+	i = di->rxin;
++	rxin = di->rxin;
++	rxout = di->rxout;
++	rxbufsize = di->rxbufsize;
 +
-+	/* return if no packets posted */
-+	if (i == di->rxout)
-+		return (NULL);
++	n = di->nrxpost - NRXDACTIVE(rxin, rxout);
 +
-+	/* ignore curr if forceall */
-+	if (!forceall && (i == B2I(R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK, dma64dd_t)))
-+		return (NULL);
++	DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
 +
-+	/* get the packet pointer that corresponds to the rx descriptor */
-+	rxp = di->rxp[i];
-+	ASSERT(rxp);
-+	di->rxp[i] = NULL;
++	for (i = 0; i < n; i++) {
++		if ((p = PKTGET(di->osh, rxbufsize, FALSE)) == NULL) {
++			DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
++			di->hnddma.rxnobuf++;
++			break;
++		}
 +
-+	/* clear this packet from the descriptor ring */
-+	DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) - di->dataoffsetlow),
-+		  di->rxbufsize, DMA_RX, rxp);
++		/* Do a cached write instead of uncached write since DMA_MAP
++		 * will flush the cache. */
++		*(uint32*)(PKTDATA(di->osh, p)) = 0;
++
++		pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->osh, p), rxbufsize, DMA_RX, p);
++		ASSERT(ISALIGNED(pa, 4));
++
++		/* save the free packet pointer */
++		ASSERT(di->rxp[rxout] == NULL);
++		di->rxp[rxout] = p;
++
++		if (DMA64_ENAB(di)) {
++			/* prep the descriptor control value */
++			if (rxout == (di->nrxd - 1))
++				ctrl = CTRL_EOT;
 +
-+	W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
-+	W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
++			dma64_dd_upd(di, di->rxd64, pa, rxout, &ctrl, rxbufsize);
++		} else {
++			/* prep the descriptor control value */
++			ctrl = rxbufsize;
++			if (rxout == (di->nrxd - 1))
++				ctrl |= CTRL_EOT;
++			dma32_dd_upd(di, di->rxd32, pa, rxout, &ctrl);
++		}
 +
-+	di->rxin = NEXTRXD(i);
++		rxout = NEXTRXD(rxout);
++	}
 +
-+	return (rxp);
++	di->rxout = rxout;
++
++	/* update the chip lastdscr pointer */
++	if (DMA64_ENAB(di)) {
++		W_REG(&di->d64rxregs->ptr, I2B(rxout, dma64dd_t));
++	} else {
++		W_REG(&di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
++	}
 +}
 +
-+static void
-+dma64_txrotate(di_t *di)
++void
++dma_txreclaim(dma_info_t *di, bool forceall)
 +{
-+	uint ad;
-+	uint nactive;
-+	uint rot;
-+	uint old, new;
-+	uint32 w;
-+	uint first, last;
++	void *p;
 +
-+	ASSERT(dma_txsuspendedidle(di));
++	DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
 +
-+	nactive = dma_txactive(di);
-+	ad = B2I((R_REG(&di->d64txregs->status1) & D64_XS1_AD_MASK), dma64dd_t);
-+	rot = TXD(ad - di->txin);
++	while ((p = dma_getnexttxp(di, forceall)))
++		PKTFREE(di->osh, p, TRUE);
++}
 +
-+	ASSERT(rot < di->ntxd);
++/*
++ * Reclaim next completed txd (txds if using chained buffers) and
++ * return associated packet.
++ * If 'force' is true, reclaim txd(s) and return associated packet
++ * regardless of the value of the hardware "curr" pointer.
++ */
++void*
++dma_getnexttxp(dma_info_t *di, bool forceall)
++{
++	if (DMA64_ENAB(di)) {
++		return dma64_getnexttxp(di, forceall);
++	} else {
++		return dma32_getnexttxp(di, forceall);
++	}
++}
++	
++/* like getnexttxp but no reclaim */
++void*
++dma_peeknexttxp(dma_info_t *di)
++{
++	uint end, i;
 +
-+	/* full-ring case is a lot harder - don't worry about this */
-+	if (rot >= (di->ntxd - nactive)) {
-+		DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
-+		return;
++	if (DMA64_ENAB(di)) {
++		end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
++	} else {
++		end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
 +	}
 +
-+	first = di->txin;
-+	last = PREVTXD(di->txout);
++	for (i = di->txin; i != end; i = NEXTTXD(i))
++		if (di->txp[i])
++			return (di->txp[i]);
 +
-+	/* move entries starting at last and moving backwards to first */
-+	for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
-+		new = TXD(old + rot);
++	return (NULL);
++}
 +
-+		/*
-+		 * Move the tx dma descriptor.
-+		 * EOT is set only in the last entry in the ring.
-+		 */
-+		w = R_SM(&di->txd64[old].ctrl1) & ~D64_CTRL1_EOT;
-+		if (new == (di->ntxd - 1))
-+			w |= D64_CTRL1_EOT;
-+		W_SM(&di->txd64[new].ctrl1, w);
++/*
++ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
++ */
++void
++dma_txrotate(di_t *di)
++{
++	if (DMA64_ENAB(di)) {
++		dma64_txrotate(di);
++	} else {
++		dma32_txrotate(di);
++	}
++}
 +
-+		w = R_SM(&di->txd64[old].ctrl2);
-+		W_SM(&di->txd64[new].ctrl2, w);
++void
++dma_rxreclaim(dma_info_t *di)
++{
++	void *p;
 +
-+		W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
-+		W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
++	DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
 +
-+		/* zap the old tx dma descriptor address field */
-+		W_SM(&di->txd64[old].addrlow, 0xdeadbeef);
-+		W_SM(&di->txd64[old].addrhigh, 0xdeadbeef);
++	while ((p = dma_getnextrxp(di, TRUE)))
++		PKTFREE(di->osh, p, FALSE);
++}
 +
-+		/* move the corresponding txp[] entry */
-+		ASSERT(di->txp[new] == NULL);
-+		di->txp[new] = di->txp[old];
-+		di->txp[old] = NULL;
++void *
++dma_getnextrxp(dma_info_t *di, bool forceall)
++{
++	if (DMA64_ENAB(di)) {
++		return dma64_getnextrxp(di, forceall);
++	} else {
++		return dma32_getnextrxp(di, forceall);
 +	}
-+
-+	/* update txin and txout */
-+	di->txin = ad;
-+	di->txout = TXD(di->txout + rot);
-+	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
-+
-+	/* kick the chip */
-+	W_REG(&di->d64txregs->ptr, I2B(di->txout, dma64dd_t));
 +}
 +
-+#endif
-+
-diff -Nur linux-2.4.32/drivers/net/hnd/linux_osl.c linux-2.4.32-brcm/drivers/net/hnd/linux_osl.c
---- linux-2.4.32/drivers/net/hnd/linux_osl.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/hnd/linux_osl.c	2005-12-16 23:39:11.292858500 +0100
-@@ -0,0 +1,708 @@
-+/*
-+ * Linux OS Independent Layer
-+ *
-+ * Copyright 2005, Broadcom Corporation
-+ * All Rights Reserved.
-+ * 
-+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+ *
-+ * $Id$
-+ */
++uintptr
++dma_getvar(dma_info_t *di, char *name)
++{
++	if (!strcmp(name, "&txavail"))
++		return ((uintptr) &di->txavail);
++	else {
++		ASSERT(0);
++	}
++	return (0);
++}
 +
-+#define LINUX_OSL
++void
++dma_txblock(dma_info_t *di)
++{
++	di->txavail = 0;
++}
 +
-+#include <typedefs.h>
-+#include <bcmendian.h>
-+#include <linux/module.h>
-+#include <linuxver.h>
-+#include <osl.h>
-+#include <bcmutils.h>
-+#include <linux/delay.h>
-+#ifdef mips
-+#include <asm/paccess.h>
-+#endif
-+#include <pcicfg.h>
++void
++dma_txunblock(dma_info_t *di)
++{
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++}
 +
-+#define PCI_CFG_RETRY 		10	
++uint
++dma_txactive(dma_info_t *di)
++{
++	return (NTXDACTIVE(di->txin, di->txout));
++}
++	
++void
++dma_rxpiomode(dma32regs_t *regs)
++{
++	W_REG(&regs->control, RC_FM);
++}
 +
-+#define OS_HANDLE_MAGIC		0x1234abcd
-+#define BCM_MEM_FILENAME_LEN 	24
++void
++dma_txpioloopback(dma32regs_t *regs)
++{
++	OR_REG(&regs->control, XC_LE);
++}
 +
-+typedef struct bcm_mem_link {
-+	struct bcm_mem_link *prev;
-+	struct bcm_mem_link *next;
-+	uint	size;
-+	int	line;
-+	char	file[BCM_MEM_FILENAME_LEN];
-+} bcm_mem_link_t;
 +
-+struct os_handle {
-+	uint magic;
-+	void *pdev;
-+	uint malloced;
-+	uint failed;
-+	bcm_mem_link_t *dbgmem_list;
-+};
 +
-+static int16 linuxbcmerrormap[] =  \
-+{	0, 			/* 0 */
-+	-EINVAL,		/* BCME_ERROR */
-+	-EINVAL,		/* BCME_BADARG*/
-+	-EINVAL,		/* BCME_BADOPTION*/
-+	-EINVAL,		/* BCME_NOTUP */
-+	-EINVAL,		/* BCME_NOTDOWN */
-+	-EINVAL,		/* BCME_NOTAP */
-+	-EINVAL,		/* BCME_NOTSTA */
-+	-EINVAL,		/* BCME_BADKEYIDX */
-+	-EINVAL,		/* BCME_RADIOOFF */
-+	-EINVAL,		/* BCME_NOTBANDLOCKED */
-+	-EINVAL, 		/* BCME_NOCLK */
-+	-EINVAL, 		/* BCME_BADRATESET */
-+	-EINVAL, 		/* BCME_BADBAND */
-+	-E2BIG,			/* BCME_BUFTOOSHORT */
-+	-E2BIG,			/* BCME_BUFTOOLONG */
-+	-EBUSY, 		/* BCME_BUSY */
-+	-EINVAL, 		/* BCME_NOTASSOCIATED */
-+	-EINVAL, 		/* BCME_BADSSIDLEN */
-+	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
-+	-EINVAL, 		/* BCME_BADCHAN */
-+	-EFAULT, 		/* BCME_BADADDR */
-+	-ENOMEM, 		/* BCME_NORESOURCE */
-+	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
-+	-EMSGSIZE,		/* BCME_BADLENGTH */
-+	-EINVAL,		/* BCME_NOTREADY */
-+	-EPERM,			/* BCME_NOTPERMITTED */
-+	-ENOMEM, 		/* BCME_NOMEM */
-+	-EINVAL, 		/* BCME_ASSOCIATED */
-+	-ERANGE, 		/* BCME_RANGE */
-+	-EINVAL 		/* BCME_NOTFOUND */
-+}; 
 +
-+/* translate bcmerrors into linux errors*/
-+int 
-+osl_error(int bcmerror)
++/*** 32 bits DMA non-inline functions ***/
++static bool
++dma32_alloc(dma_info_t *di, uint direction)
 +{
-+	int abs_bcmerror;
-+	int array_size = ARRAYSIZE(linuxbcmerrormap); 
-+	
-+	abs_bcmerror = ABS(bcmerror);	
++	uint size;
++	uint ddlen;
++	void *va;
 +
-+	if (bcmerror > 0)
-+		abs_bcmerror = 0;
++	ddlen = sizeof (dma32dd_t);
 +
-+ 	else if (abs_bcmerror >= array_size)
-+		abs_bcmerror = BCME_ERROR;
++	size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
 +
-+	return linuxbcmerrormap[abs_bcmerror];
++	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, D32RINGALIGN))
++		size += D32RINGALIGN;
++
++
++	if (direction == DMA_TX) {
++		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
++			return FALSE;
++		}
++
++		di->txd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
++		di->txdalign = (uint)((int8*)di->txd32 - (int8*)va);
++		di->txdpa += di->txdalign;
++		di->txdalloc = size;
++		ASSERT(ISALIGNED((uintptr)di->txd32, D32RINGALIGN));
++	} else {
++		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
++			return FALSE;
++		}
++		di->rxd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
++		di->rxdalign = (uint)((int8*)di->rxd32 - (int8*)va);
++		di->rxdpa += di->rxdalign;
++		di->rxdalloc = size;
++		ASSERT(ISALIGNED((uintptr)di->rxd32, D32RINGALIGN));
++	}
++
++	return TRUE;
 +}
 +
-+osl_t *
-+osl_attach(void *pdev)
++static void 
++dma32_txreset(dma_info_t *di)
 +{
-+	osl_t *osh;
++	uint32 status;
 +
-+	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
-+	ASSERT(osh);
++	/* suspend tx DMA first */
++	W_REG(&di->d32txregs->control, XC_SE);
++	SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED &&
++		 status != XS_XS_IDLE &&
++		 status != XS_XS_STOPPED,
++		 10000);
 +
-+	/* 
-+	 * check the cases where 
-+	 * 1.Error code Added to bcmerror table, but forgot to add it to the OS 
-+	 * dependent error code
-+	 * 2. Error code is added to the bcmerror table, but forgot to add the 
-+	 * corresponding errorstring(dummy call to bcmerrorstr)
-+	 */
-+	bcmerrorstr(0);
-+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
++	W_REG(&di->d32txregs->control, 0);
++	SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED,
++		 10000);
 +
-+	osh->magic = OS_HANDLE_MAGIC;
-+	osh->malloced = 0;
-+	osh->failed = 0;
-+	osh->dbgmem_list = NULL;
-+	osh->pdev = pdev;
++	if (status != XS_XS_DISABLED) {
++		DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
++	}
 +
-+	return osh;
++	/* wait for the last transaction to complete */
++	OSL_DELAY(300);
 +}
 +
-+void
-+osl_detach(osl_t *osh)
++static void 
++dma32_rxreset(dma_info_t *di)
 +{
-+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC));
-+	kfree(osh);
++	uint32 status;
++
++	W_REG(&di->d32rxregs->control, 0);
++	SPINWAIT((status = (R_REG(&di->d32rxregs->status) & RS_RS_MASK)) != RS_RS_DISABLED,
++		 10000);
++
++	if (status != RS_RS_DISABLED) {
++		DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
++	}
 +}
 +
-+void*
-+osl_pktget(osl_t *osh, uint len, bool send)
++static bool
++dma32_txsuspendedidle(dma_info_t *di)
 +{
-+	struct sk_buff *skb;
++	if (!(R_REG(&di->d32txregs->control) & XC_SE))
++		return 0;
++	
++	if ((R_REG(&di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
++		return 0;
 +
-+	if ((skb = dev_alloc_skb(len)) == NULL)
-+		return (NULL);
++	OSL_DELAY(2);
++	return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_IDLE);
++}
 +
-+	skb_put(skb, len);
++/*
++ * supports full 32bit dma engine buffer addressing so
++ * dma buffers can cross 4 Kbyte page boundaries.
++ */
++static int
++dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags)
++{
++	void *p, *next;
++	uchar *data;
++	uint len;
++	uint txout;
++	uint32 ctrl;
++	uint32 pa;	
 +
-+	/* ensure the cookie field is cleared */ 
-+	PKTSETCOOKIE(skb, NULL);
++	DMA_TRACE(("%s: dma_txfast\n", di->name));
 +
-+	return ((void*) skb);
-+}
++	txout = di->txout;
++	ctrl = 0;
 +
-+void
-+osl_pktfree(void *p)
-+{
-+	struct sk_buff *skb, *nskb;
++	/*
++	 * Walk the chain of packet buffers
++	 * allocating and initializing transmit descriptor entries.
++	 */
++	for (p = p0; p; p = next) {
++		data = PKTDATA(di->osh, p);
++		len = PKTLEN(di->osh, p);
++		next = PKTNEXT(di->osh, p);
 +
-+	skb = (struct sk_buff*) p;
++		/* return nonzero if out of tx descriptors */
++		if (NEXTTXD(txout) == di->txin)
++			goto outoftxd;
 +
-+	/* perversion: we use skb->next to chain multi-skb packets */
-+	while (skb) {
-+		nskb = skb->next;
-+		skb->next = NULL;
-+		if (skb->destructor) {
-+			/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if destructor exists */
-+			dev_kfree_skb_any(skb);
++		if (len == 0)
++			continue;
++
++		/* get physical address of buffer start */
++		pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
++
++		/* build the descriptor control value */
++		ctrl = len & CTRL_BC_MASK;
++
++		ctrl |= coreflags;
++		
++		if (p == p0)
++			ctrl |= CTRL_SOF;
++		if (next == NULL)
++			ctrl |= (CTRL_IOC | CTRL_EOF);
++		if (txout == (di->ntxd - 1))
++			ctrl |= CTRL_EOT;
++
++		if (DMA64_ENAB(di)) {
++			dma64_dd_upd(di, di->txd64, pa, txout, &ctrl, len);
 +		} else {
-+			/* can free immediately (even in_irq()) if destructor does not exist */
-+			dev_kfree_skb(skb);
++			dma32_dd_upd(di, di->txd32, pa, txout, &ctrl);
 +		}
-+		skb = nskb;
++
++		ASSERT(di->txp[txout] == NULL);
++
++		txout = NEXTTXD(txout);
 +	}
-+}
 +
-+uint32
-+osl_pci_read_config(osl_t *osh, uint offset, uint size)
-+{
-+	uint val;
-+	uint retry=PCI_CFG_RETRY;	 
++	/* if last txd eof not set, fix it */
++	if (!(ctrl & CTRL_EOF))
++		W_SM(&di->txd32[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF));
 +
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	/* save the packet */
++	di->txp[PREVTXD(txout)] = p0;
 +
-+	/* only 4byte access supported */
-+	ASSERT(size == 4);
++	/* bump the tx descriptor index */
++	di->txout = txout;
 +
-+	do {
-+		pci_read_config_dword(osh->pdev, offset, &val);
-+		if (val != 0xffffffff)
-+			break;
-+	} while (retry--);
++	/* kick the chip */
++	if (DMA64_ENAB(di)) {
++		W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
++	} else {
++		W_REG(&di->d32txregs->ptr, I2B(txout, dma32dd_t));
++	}
 +
++	/* tx flow control */
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +
-+	return (val);
++	return (0);
++
++ outoftxd:
++	DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
++	PKTFREE(di->osh, p0, TRUE);
++	di->txavail = 0;
++	di->hnddma.txnobuf++;
++	return (-1);
 +}
 +
-+void
-+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
++static void*
++dma32_getnexttxp(dma_info_t *di, bool forceall)
 +{
-+	uint retry=PCI_CFG_RETRY;	 
++	uint start, end, i;
++	void *txp;
 +
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
 +
-+	/* only 4byte access supported */
-+	ASSERT(size == 4);
++	txp = NULL;
 +
-+	do {
-+		pci_write_config_dword(osh->pdev, offset, val);
-+		if (offset!=PCI_BAR0_WIN)
-+			break;
-+		if (osl_pci_read_config(osh,offset,size) == val) 
-+			break;
-+	} while (retry--);
++	start = di->txin;
++	if (forceall)
++		end = di->txout;
++	else
++		end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
 +
-+}
++	if ((start == 0) && (end > di->txout))
++		goto bogus;
 +
-+/* return bus # for the pci device pointed by osh->pdev */
-+uint
-+osl_pci_bus(osl_t *osh)
-+{
-+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
++	for (i = start; i != end && !txp; i = NEXTTXD(i)) {
++		DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - di->dataoffsetlow),
++			  (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & CTRL_BC_MASK), DMA_TX, di->txp[i]);
 +
-+	return ((struct pci_dev *)osh->pdev)->bus->number;
-+}
++		W_SM(&di->txd32[i].addr, 0xdeadbeef);
++		txp = di->txp[i];
++		di->txp[i] = NULL;
++	}
 +
-+/* return slot # for the pci device pointed by osh->pdev */
-+uint
-+osl_pci_slot(osl_t *osh)
-+{
-+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
++	di->txin = i;
 +
-+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
-+}
++	/* tx flow control */
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +
-+static void
-+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
-+{
-+}
++	return (txp);
 +
-+void
-+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
-+{
-+	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
++bogus:
++/*
++	DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
++		start, end, di->txout, forceall));
++*/
++	return (NULL);
 +}
 +
-+void
-+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
++static void *
++dma32_getnextrxp(dma_info_t *di, bool forceall)
 +{
-+	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
-+}
++	uint i;
++	void *rxp;
 +
++	/* if forcing, dma engine must be disabled */
++	ASSERT(!forceall || !dma_rxenabled(di));
 +
-+#ifdef BCMDBG_MEM
++	i = di->rxin;
 +
-+void*
-+osl_debug_malloc(osl_t *osh, uint size, int line, char* file)
-+{
-+	bcm_mem_link_t *p;
-+	char* basename;
++	/* return if no packets posted */
++	if (i == di->rxout)
++		return (NULL);
 +
-+	ASSERT(size);
-+	
-+	if ((p = (bcm_mem_link_t*)osl_malloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL)
++	/* ignore curr if forceall */
++	if (!forceall && (i == B2I(R_REG(&di->d32rxregs->status) & RS_CD_MASK, dma32dd_t)))
 +		return (NULL);
-+	
-+	p->size = size;
-+	p->line = line;
-+	
-+	basename = strrchr(file, '/');
-+	/* skip the '/' */
-+	if (basename)
-+		basename++;
 +
-+	if (!basename)
-+		basename = file;
-+	
-+	strncpy(p->file, basename, BCM_MEM_FILENAME_LEN);
-+	p->file[BCM_MEM_FILENAME_LEN - 1] = '\0';
++	/* get the packet pointer that corresponds to the rx descriptor */
++	rxp = di->rxp[i];
++	ASSERT(rxp);
++	di->rxp[i] = NULL;
 +
-+	/* link this block */
-+	p->prev = NULL;
-+	p->next = osh->dbgmem_list;
-+	if (p->next)
-+		p->next->prev = p;
-+	osh->dbgmem_list = p;
++	/* clear this packet from the descriptor ring */
++	DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - di->dataoffsetlow),
++		  di->rxbufsize, DMA_RX, rxp);
++	W_SM(&di->rxd32[i].addr, 0xdeadbeef);
 +
-+	return p + 1;
++	di->rxin = NEXTRXD(i);
++
++	return (rxp);
 +}
 +
-+void
-+osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, char* file)
++static void
++dma32_txrotate(di_t *di)
 +{
-+	bcm_mem_link_t *p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
-+	
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	uint ad;
++	uint nactive;
++	uint rot;
++	uint old, new;
++	uint32 w;
++	uint first, last;
 +
-+	if (p->size == 0) {
-+		printk("osl_debug_mfree: double free on addr 0x%x size %d at line %d file %s\n", 
-+			(uint)addr, size, line, file);
-+		ASSERT(p->size);
++	ASSERT(dma_txsuspendedidle(di));
++
++	nactive = dma_txactive(di);
++	ad = B2I(((R_REG(&di->d32txregs->status) & XS_AD_MASK) >> XS_AD_SHIFT), dma32dd_t);
++	rot = TXD(ad - di->txin);
++
++	ASSERT(rot < di->ntxd);
++
++	/* full-ring case is a lot harder - don't worry about this */
++	if (rot >= (di->ntxd - nactive)) {
++		DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
 +		return;
 +	}
 +
-+	if (p->size != size) {
-+		printk("osl_debug_mfree: dealloc size %d does not match alloc size %d on addr 0x%x at line %d file %s\n",
-+		       size, p->size, (uint)addr, line, file);
-+		ASSERT(p->size == size);
-+		return;
++	first = di->txin;
++	last = PREVTXD(di->txout);
++
++	/* move entries starting at last and moving backwards to first */
++	for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
++		new = TXD(old + rot);
++
++		/*
++		 * Move the tx dma descriptor.
++		 * EOT is set only in the last entry in the ring.
++		 */
++		w = R_SM(&di->txd32[old].ctrl) & ~CTRL_EOT;
++		if (new == (di->ntxd - 1))
++			w |= CTRL_EOT;
++		W_SM(&di->txd32[new].ctrl, w);
++		W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
++
++		/* zap the old tx dma descriptor address field */
++		W_SM(&di->txd32[old].addr, 0xdeadbeef);
++
++		/* move the corresponding txp[] entry */
++		ASSERT(di->txp[new] == NULL);
++		di->txp[new] = di->txp[old];
++		di->txp[old] = NULL;
 +	}
 +
-+	/* unlink this block */
-+	if (p->prev)
-+		p->prev->next = p->next;
-+	if (p->next)
-+		p->next->prev = p->prev;
-+	if (osh->dbgmem_list == p)
-+		osh->dbgmem_list = p->next;
-+	p->next = p->prev = NULL;
++	/* update txin and txout */
++	di->txin = ad;
++	di->txout = TXD(di->txout + rot);
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +
-+	osl_mfree(osh, p, size + sizeof(bcm_mem_link_t));
++	/* kick the chip */
++	W_REG(&di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
 +}
 +
-+char*
-+osl_debug_memdump(osl_t *osh, char *buf, uint sz)
++/*** 64 bits DMA non-inline functions ***/
++
++#ifdef BCMDMA64
++
++static bool
++dma64_alloc(dma_info_t *di, uint direction)
 +{
-+	bcm_mem_link_t *p;
-+	char *obuf;
-+	
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-+	obuf = buf;
++	uint size;
++	uint ddlen;
++	uint32 alignbytes;
++	void *va;
 +
-+	buf += sprintf(buf, "   Address\tSize\tFile:line\n");
-+	for (p = osh->dbgmem_list; p && ((buf - obuf) < (sz - 128)); p = p->next)
-+		buf += sprintf(buf, "0x%08x\t%5d\t%s:%d\n",
-+			(int)p + sizeof(bcm_mem_link_t), p->size, p->file, p->line);
++	ddlen = sizeof (dma64dd_t);
 +
-+	return (obuf);
-+}
++	size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
 +
-+#endif	/* BCMDBG_MEM */
++	alignbytes = di->dma64align;
 +
-+void*
-+osl_malloc(osl_t *osh, uint size)
-+{
-+	void *addr;
-+	
-+	/* only ASSERT if osh is defined */
-+	if (osh)
-+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
++	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, alignbytes))
++		size += alignbytes;
 +
-+	if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
-+		if(osh)
-+			osh->failed++;
-+		return (NULL);
-+	}
-+	if (osh)
-+		osh->malloced += size;
-+	
-+	return (addr);
-+}
 +
-+void
-+osl_mfree(osl_t *osh, void *addr, uint size)
-+{
-+	if (osh) {
-+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
-+		osh->malloced -= size;
++	if (direction == DMA_TX) {
++		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
++			return FALSE;
++		}
++
++		di->txd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
++		di->txdalign = (uint)((int8*)di->txd64 - (int8*)va);
++		di->txdpa += di->txdalign;
++		di->txdalloc = size;
++		ASSERT(ISALIGNED((uintptr)di->txd64, alignbytes));
++	} else {
++		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
++			return FALSE;
++		}
++		di->rxd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
++		di->rxdalign = (uint)((int8*)di->rxd64 - (int8*)va);
++		di->rxdpa += di->rxdalign;
++		di->rxdalloc = size;
++		ASSERT(ISALIGNED((uintptr)di->rxd64, alignbytes));
 +	}
-+	kfree(addr);
-+}
 +
-+uint
-+osl_malloced(osl_t *osh)
-+{
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-+	return (osh->malloced);
++	return TRUE;
 +}
 +
-+uint osl_malloc_failed(osl_t *osh)
++static void 
++dma64_txreset(dma_info_t *di)
 +{
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-+	return (osh->failed);
-+}
++	uint32 status;
++
++	/* suspend tx DMA first */
++	W_REG(&di->d64txregs->control, D64_XC_SE);
++	SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED &&
++		 status != D64_XS0_XS_IDLE &&
++		 status != D64_XS0_XS_STOPPED,
++		 10000);
++
++	W_REG(&di->d64txregs->control, 0);
++	SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED,
++		 10000);
 +
-+void*
-+osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap)
-+{
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	if (status != D64_XS0_XS_DISABLED) {
++		DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
++	}
 +
-+	return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
++	/* wait for the last transaction to complete */
++	OSL_DELAY(300);
 +}
 +
-+void
-+osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
++static void 
++dma64_rxreset(dma_info_t *di)
 +{
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
++	uint32 status;
 +
-+	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
-+}
++	W_REG(&di->d64rxregs->control, 0);
++	SPINWAIT((status = (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED,
++		 10000);
 +
-+uint
-+osl_dma_map(osl_t *osh, void *va, uint size, int direction)
-+{
-+	int dir;
-+	
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
-+	return (pci_map_single(osh->pdev, va, size, dir));
++	if (status != D64_RS0_RS_DISABLED) {
++		DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
++	}
 +}
 +
-+void
-+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
++static bool
++dma64_txsuspendedidle(dma_info_t *di)
 +{
-+	int dir;
++
++	if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
++		return 0;
 +	
-+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
-+	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
++	if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_IDLE)
++		return 1;
++
++	return 0;
 +}
 +
-+#if defined(BINOSL)
-+void
-+osl_assert(char *exp, char *file, int line)
++/*
++ * supports full 32bit dma engine buffer addressing so
++ * dma buffers can cross 4 Kbyte page boundaries.
++ */
++static int
++dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags)
 +{
-+	char tempbuf[255];
++	void *p, *next;
++	uchar *data;
++	uint len;
++	uint txout;
++	uint32 flags;
++	uint32 pa;	
 +
-+	sprintf(tempbuf, "assertion \"%s\" failed: file \"%s\", line %d\n", exp, file, line);
-+	panic(tempbuf);
-+}
-+#endif	/* BCMDBG || BINOSL */
++	DMA_TRACE(("%s: dma_txfast\n", di->name));
 +
-+void
-+osl_delay(uint usec)
-+{
-+	uint d;
++	txout = di->txout;
++	flags = 0;
 +
-+	while (usec > 0) {
-+		d = MIN(usec, 1000);
-+		udelay(d);
-+		usec -= d;
-+	}
-+}
++	/*
++	 * Walk the chain of packet buffers
++	 * allocating and initializing transmit descriptor entries.
++	 */
++	for (p = p0; p; p = next) {
++		data = PKTDATA(di->osh, p);
++		len = PKTLEN(di->osh, p);
++		next = PKTNEXT(di->osh, p);
 +
-+/*
-+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
-+ */
-+#ifdef BINOSL
++		/* return nonzero if out of tx descriptors */
++		if (NEXTTXD(txout) == di->txin)
++			goto outoftxd;
 +
-+int
-+osl_printf(const char *format, ...)
-+{
-+	va_list args;
-+	char buf[1024];
-+	int len;
++		if (len == 0)
++			continue;
 +
-+	/* sprintf into a local buffer because there *is* no "vprintk()".. */
-+	va_start(args, format);
-+	len = vsprintf(buf, format, args);
-+	va_end(args);
++		/* get physical address of buffer start */
++		pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
 +
-+	if (len > sizeof (buf)) {
-+		printk("osl_printf: buffer overrun\n");
-+		return (0);
++		flags = coreflags;
++		
++		if (p == p0)
++			flags |= D64_CTRL1_SOF;
++		if (next == NULL)
++			flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
++		if (txout == (di->ntxd - 1))
++			flags |= D64_CTRL1_EOT;
++
++		dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
++
++		ASSERT(di->txp[txout] == NULL);
++
++		txout = NEXTTXD(txout);
 +	}
 +
-+	return (printk(buf));
-+}
++	/* if last txd eof not set, fix it */
++	if (!(flags & D64_CTRL1_EOF))
++		W_SM(&di->txd64[PREVTXD(txout)].ctrl1, BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
 +
-+int
-+osl_sprintf(char *buf, const char *format, ...)
-+{
-+	va_list args;
-+	int rc;
++	/* save the packet */
++	di->txp[PREVTXD(txout)] = p0;
 +
-+	va_start(args, format);
-+	rc = vsprintf(buf, format, args);
-+	va_end(args);
-+	return (rc);
-+}
++	/* bump the tx descriptor index */
++	di->txout = txout;
 +
-+int
-+osl_strcmp(const char *s1, const char *s2)
-+{
-+	return (strcmp(s1, s2));
-+}
++	/* kick the chip */
++	W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
 +
-+int
-+osl_strncmp(const char *s1, const char *s2, uint n)
-+{
-+	return (strncmp(s1, s2, n));
-+}
++	/* tx flow control */
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +
-+int
-+osl_strlen(const char *s)
-+{
-+	return (strlen(s));
-+}
++	return (0);
 +
-+char*
-+osl_strcpy(char *d, const char *s)
-+{
-+	return (strcpy(d, s));
++outoftxd:
++	DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
++	PKTFREE(di->osh, p0, TRUE);
++	di->txavail = 0;
++	di->hnddma.txnobuf++;
++	return (-1);
 +}
 +
-+char*
-+osl_strncpy(char *d, const char *s, uint n)
++static void*
++dma64_getnexttxp(dma_info_t *di, bool forceall)
 +{
-+	return (strncpy(d, s, n));
-+}
++	uint start, end, i;
++	void *txp;
 +
-+void
-+bcopy(const void *src, void *dst, int len)
-+{
-+	memcpy(dst, src, len);
-+}
++	DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
 +
-+int
-+bcmp(const void *b1, const void *b2, int len)
-+{
-+	return (memcmp(b1, b2, len));
-+}
++	txp = NULL;
 +
-+void
-+bzero(void *b, int len)
-+{
-+	memset(b, '\0', len);
-+}
++	start = di->txin;
++	if (forceall)
++		end = di->txout;
++	else
++		end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
 +
-+uint32
-+osl_readl(volatile uint32 *r)
-+{
-+	return (readl(r));
-+}
++	if ((start == 0) && (end > di->txout))
++		goto bogus;
 +
-+uint16
-+osl_readw(volatile uint16 *r)
-+{
-+	return (readw(r));
-+}
++	for (i = start; i != end && !txp; i = NEXTTXD(i)) {
++		DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) - di->dataoffsetlow),
++			  (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) & D64_CTRL2_BC_MASK), DMA_TX, di->txp[i]);
 +
-+uint8
-+osl_readb(volatile uint8 *r)
-+{
-+	return (readb(r));
-+}
++		W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
++		W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
 +
-+void
-+osl_writel(uint32 v, volatile uint32 *r)
-+{
-+	writel(v, r);
-+}
++		txp = di->txp[i];
++		di->txp[i] = NULL;
++	}
 +
-+void
-+osl_writew(uint16 v, volatile uint16 *r)
-+{
-+	writew(v, r);
-+}
++	di->txin = i;
 +
-+void
-+osl_writeb(uint8 v, volatile uint8 *r)
-+{
-+	writeb(v, r);
++	/* tx flow control */
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++
++	return (txp);
++
++bogus:
++/*
++	DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
++		start, end, di->txout, forceall));
++*/
++	return (NULL);
 +}
 +
-+void *
-+osl_uncached(void *va)
++static void *
++dma64_getnextrxp(dma_info_t *di, bool forceall)
 +{
-+#ifdef mips
-+	return ((void*)KSEG1ADDR(va));
-+#else
-+	return ((void*)va);
-+#endif
-+}
++	uint i;
++	void *rxp;
 +
-+uint
-+osl_getcycles(void)
-+{
-+	uint cycles;
++	/* if forcing, dma engine must be disabled */
++	ASSERT(!forceall || !dma_rxenabled(di));
 +
-+#if defined(mips)
-+	cycles = read_c0_count() * 2;
-+#elif defined(__i386__)
-+	rdtscl(cycles);
-+#else
-+	cycles = 0;
-+#endif
-+	return cycles;
-+}
++	i = di->rxin;
 +
-+void *
-+osl_reg_map(uint32 pa, uint size)
-+{
-+	return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
-+}
++	/* return if no packets posted */
++	if (i == di->rxout)
++		return (NULL);
 +
-+void
-+osl_reg_unmap(void *va)
-+{
-+	iounmap(va);
-+}
++	/* ignore curr if forceall */
++	if (!forceall && (i == B2I(R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK, dma64dd_t)))
++		return (NULL);
 +
-+int
-+osl_busprobe(uint32 *val, uint32 addr)
-+{
-+#ifdef mips
-+	return get_dbe(*val, (uint32*)addr);
-+#else
-+	*val = readl(addr);
-+	return 0;
-+#endif
-+}
++	/* get the packet pointer that corresponds to the rx descriptor */
++	rxp = di->rxp[i];
++	ASSERT(rxp);
++	di->rxp[i] = NULL;
 +
-+uchar*
-+osl_pktdata(osl_t *osh, void *skb)
-+{
-+	return (((struct sk_buff*)skb)->data);
-+}
++	/* clear this packet from the descriptor ring */
++	DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) - di->dataoffsetlow),
++		  di->rxbufsize, DMA_RX, rxp);
 +
-+uint
-+osl_pktlen(osl_t *osh, void *skb)
-+{
-+	return (((struct sk_buff*)skb)->len);
-+}
++	W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
++	W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
 +
-+uint
-+osl_pktheadroom(osl_t *osh, void *skb)
-+{
-+	return (uint) skb_headroom((struct sk_buff *) skb);
-+}
++	di->rxin = NEXTRXD(i);
 +
-+uint
-+osl_pkttailroom(osl_t *osh, void *skb)
-+{
-+	return (uint) skb_tailroom((struct sk_buff *) skb);
++	return (rxp);
 +}
 +
-+void*
-+osl_pktnext(osl_t *osh, void *skb)
++static void
++dma64_txrotate(di_t *di)
 +{
-+	return (((struct sk_buff*)skb)->next);
-+}
++	uint ad;
++	uint nactive;
++	uint rot;
++	uint old, new;
++	uint32 w;
++	uint first, last;
 +
-+void
-+osl_pktsetnext(void *skb, void *x)
-+{
-+	((struct sk_buff*)skb)->next = (struct sk_buff*)x;
-+}
++	ASSERT(dma_txsuspendedidle(di));
 +
-+void
-+osl_pktsetlen(osl_t *osh, void *skb, uint len)
-+{
-+	__skb_trim((struct sk_buff*)skb, len);
-+}
++	nactive = dma_txactive(di);
++	ad = B2I((R_REG(&di->d64txregs->status1) & D64_XS1_AD_MASK), dma64dd_t);
++	rot = TXD(ad - di->txin);
 +
-+uchar*
-+osl_pktpush(osl_t *osh, void *skb, int bytes)
-+{
-+	return (skb_push((struct sk_buff*)skb, bytes));
-+}
++	ASSERT(rot < di->ntxd);
 +
-+uchar*
-+osl_pktpull(osl_t *osh, void *skb, int bytes)
-+{
-+	return (skb_pull((struct sk_buff*)skb, bytes));
-+}
++	/* full-ring case is a lot harder - don't worry about this */
++	if (rot >= (di->ntxd - nactive)) {
++		DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
++		return;
++	}
 +
-+void*
-+osl_pktdup(osl_t *osh, void *skb)
-+{
-+	return (skb_clone((struct sk_buff*)skb, GFP_ATOMIC));
-+}
++	first = di->txin;
++	last = PREVTXD(di->txout);
 +
-+void*
-+osl_pktcookie(void *skb)
-+{
-+	return ((void*)((struct sk_buff*)skb)->csum);
-+}
++	/* move entries starting at last and moving backwards to first */
++	for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
++		new = TXD(old + rot);
 +
-+void
-+osl_pktsetcookie(void *skb, void *x)
-+{
-+	((struct sk_buff*)skb)->csum = (uint)x;
-+}
++		/*
++		 * Move the tx dma descriptor.
++		 * EOT is set only in the last entry in the ring.
++		 */
++		w = R_SM(&di->txd64[old].ctrl1) & ~D64_CTRL1_EOT;
++		if (new == (di->ntxd - 1))
++			w |= D64_CTRL1_EOT;
++		W_SM(&di->txd64[new].ctrl1, w);
 +
-+void*
-+osl_pktlink(void *skb)
-+{
-+	return (((struct sk_buff*)skb)->prev);
-+}
++		w = R_SM(&di->txd64[old].ctrl2);
++		W_SM(&di->txd64[new].ctrl2, w);
 +
-+void
-+osl_pktsetlink(void *skb, void *x)
-+{
-+	((struct sk_buff*)skb)->prev = (struct sk_buff*)x;
-+}
++		W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
++		W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
 +
-+uint
-+osl_pktprio(void *skb)
-+{
-+	return (((struct sk_buff*)skb)->priority);
-+}
++		/* zap the old tx dma descriptor address field */
++		W_SM(&di->txd64[old].addrlow, 0xdeadbeef);
++		W_SM(&di->txd64[old].addrhigh, 0xdeadbeef);
 +
-+void
-+osl_pktsetprio(void *skb, uint x)
-+{
-+	((struct sk_buff*)skb)->priority = x;
++		/* move the corresponding txp[] entry */
++		ASSERT(di->txp[new] == NULL);
++		di->txp[new] = di->txp[old];
++		di->txp[old] = NULL;
++	}
++
++	/* update txin and txout */
++	di->txin = ad;
++	di->txout = TXD(di->txout + rot);
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++
++	/* kick the chip */
++	W_REG(&di->d64txregs->ptr, I2B(di->txout, dma64dd_t));
 +}
 +
++#endif
 +
-+#endif	/* BINOSL */
-diff -Nur linux-2.4.32/drivers/net/hnd/Makefile linux-2.4.32-brcm/drivers/net/hnd/Makefile
---- linux-2.4.32/drivers/net/hnd/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/hnd/Makefile	2005-12-16 23:39:11.284858000 +0100
-@@ -0,0 +1,19 @@
-+#
-+# Makefile for the BCM47xx specific kernel interface routines
-+# under Linux.
-+#
+diff -Naur linux.old/drivers/net/wl/hnddma.h linux.dev/drivers/net/wl/hnddma.h
+--- linux.old/drivers/net/wl/hnddma.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl/hnddma.h	2006-04-06 16:57:44.000000000 +0200
+@@ -0,0 +1,69 @@
++/*
++ * Generic Broadcom Home Networking Division (HND) DMA engine SW interface
++ * This supports the following chips: BCM42xx, 44xx, 47xx .
++ *
++ * Copyright 2005, Broadcom Corporation      
++ * All Rights Reserved.      
++ *       
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
++ * $Id$
++ */
 +
-+EXTRA_CFLAGS	+= -I$(TOPDIR)/arch/mips/bcm947xx/include -DBCMDRIVER
++#ifndef	_hnddma_h_
++#define	_hnddma_h_
 +
-+O_TARGET	:= hnd.o
++/* export structure */
++typedef volatile struct {
++	/* rx error counters */
++	uint		rxgiants;	/* rx giant frames */
++	uint		rxnobuf;	/* rx out of dma descriptors */
++	/* tx error counters */
++	uint		txnobuf;	/* tx out of dma descriptors */
++} hnddma_t;
++
++#ifndef di_t
++#define	di_t	void
++#endif
++
++#ifndef osl_t 
++#define osl_t void
++#endif
 +
-+HND_OBJS	:= bcmutils.o hnddma.o linux_osl.o sbutils.o bcmsrom.o
++/* externs */
++extern void dma_detach(di_t *di);
++extern void dma_txreset(di_t *di);
++extern void dma_rxreset(di_t *di);
++extern void dma_txinit(di_t *di);
++extern bool dma_txenabled(di_t *di);
++extern void dma_rxinit(di_t *di);
++extern void dma_rxenable(di_t *di);
++extern bool dma_rxenabled(di_t *di);
++extern void dma_txsuspend(di_t *di);
++extern void dma_txresume(di_t *di);
++extern bool dma_txsuspended(di_t *di);
++extern bool dma_txsuspendedidle(di_t *di);
++extern bool dma_txstopped(di_t *di);
++extern bool dma_rxstopped(di_t *di);
++extern int dma_txfast(di_t *di, void *p, uint32 coreflags);
++extern void dma_fifoloopbackenable(di_t *di);
++extern void *dma_rx(di_t *di);
++extern void dma_rxfill(di_t *di);
++extern void dma_txreclaim(di_t *di, bool forceall);
++extern void dma_rxreclaim(di_t *di);
++extern uintptr dma_getvar(di_t *di, char *name);
++extern void *dma_getnexttxp(di_t *di, bool forceall);
++extern void *dma_peeknexttxp(di_t *di);
++extern void *dma_getnextrxp(di_t *di, bool forceall);
++extern void dma_txblock(di_t *di);
++extern void dma_txunblock(di_t *di);
++extern uint dma_txactive(di_t *di);
++extern void dma_txrotate(di_t *di);
 +
-+export-objs	:= shared_ksyms.o
-+obj-y		:= shared_ksyms.o $(HND_OBJS)
-+obj-m           := $(O_TARGET)
++extern void dma_rxpiomode(dma32regs_t *);
++extern void dma_txpioloopback(dma32regs_t *);
 +
-+include $(TOPDIR)/Rules.make
 +
-+shared_ksyms.c: shared_ksyms.sh $(HND_OBJS)
-+	sh -e $< $(HND_OBJS) > $@
-diff -Nur linux-2.4.32/drivers/net/hnd/sbutils.c linux-2.4.32-brcm/drivers/net/hnd/sbutils.c
---- linux-2.4.32/drivers/net/hnd/sbutils.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/hnd/sbutils.c	2005-12-16 23:39:11.316860000 +0100
-@@ -0,0 +1,2837 @@
++#endif	/* _hnddma_h_ */
+diff -Naur linux.old/drivers/net/wl/pktq.h linux.dev/drivers/net/wl/pktq.h
+--- linux.old/drivers/net/wl/pktq.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl/pktq.h	2006-04-06 17:32:52.000000000 +0200
+@@ -0,0 +1,48 @@
 +/*
-+ * Misc utility routines for accessing chip-specific features
-+ * of the SiliconBackplane-based Broadcom chips.
++ * Misc useful os-independent macros and functions.
 + *
 + * Copyright 2005, Broadcom Corporation
 + * All Rights Reserved.
@@ -14928,2933 +17486,2736 @@ diff -Nur linux-2.4.32/drivers/net/hnd/sbutils.c linux-2.4.32-brcm/drivers/net/h
 + * $Id$
 + */
 +
-+#include <typedefs.h>
-+#include <osl.h>
-+#include <sbutils.h>
-+#include <bcmutils.h>
-+#include <bcmdevs.h>
-+#include <sbconfig.h>
-+#include <sbchipc.h>
-+#include <sbpci.h>
-+#include <sbpcie.h>
-+#include <pcicfg.h>
-+#include <sbpcmcia.h>
-+#include <sbextif.h>
-+#include <bcmsrom.h>
-+
-+/* debug/trace */
-+#define	SB_ERROR(args)
-+
-+
-+typedef uint32 (*sb_intrsoff_t)(void *intr_arg);
-+typedef void (*sb_intrsrestore_t)(void *intr_arg, uint32 arg);
-+typedef bool (*sb_intrsenabled_t)(void *intr_arg);
-+
-+/* misc sb info needed by some of the routines */
-+typedef struct sb_info {
-+
-+	struct sb_pub  	sb;			/* back plane public state(must be first field of sb_info */
-+
-+	void	*osh;			/* osl os handle */
-+	void	*sdh;			/* bcmsdh handle */
-+
-+	void	*curmap;		/* current regs va */
-+	void	*regs[SB_MAXCORES];	/* other regs va */
-+
-+	uint	curidx;			/* current core index */
-+	uint	dev_coreid;		/* the core provides driver functions */
-+
-+	bool	memseg;			/* flag to toggle MEM_SEG register */
-+
-+	uint	gpioidx;		/* gpio control core index */
-+	uint	gpioid;			/* gpio control coretype */
-+
-+	uint	numcores;		/* # discovered cores */
-+	uint	coreid[SB_MAXCORES];	/* id of each core */
-+
-+	void	*intr_arg;		/* interrupt callback function arg */
-+	sb_intrsoff_t		intrsoff_fn;		/* function turns chip interrupts off */
-+	sb_intrsrestore_t	intrsrestore_fn;	/* function restore chip interrupts */
-+	sb_intrsenabled_t	intrsenabled_fn;	/* function to check if chip interrupts are enabled */
-+
-+} sb_info_t;
-+
-+/* local prototypes */
-+static sb_info_t * BCMINIT(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
-+	uint bustype, void *sdh, char **vars, int *varsz);
-+static void BCMINIT(sb_scan)(sb_info_t *si);
-+static uint sb_corereg(sb_info_t *si, uint coreidx, uint regoff, uint mask, uint val);
-+static uint _sb_coreidx(sb_info_t *si);
-+static uint sb_findcoreidx(sb_info_t *si, uint coreid, uint coreunit);
-+static uint BCMINIT(sb_pcidev2chip)(uint pcidev);
-+static uint BCMINIT(sb_chip2numcores)(uint chip);
-+static bool sb_ispcie(sb_info_t *si);
-+static bool sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen);
-+static int sb_pci_fixcfg(sb_info_t *si);
-+
-+/* routines to access mdio slave device registers */
-+static int sb_pcie_mdiowrite(sb_info_t *si,  uint physmedia, uint readdr, uint val);
-+static void BCMINIT(sb_war30841)(sb_info_t *si);
-+
-+/* delay needed between the mdio control/ mdiodata register data access */
-+#define PR28829_DELAY() OSL_DELAY(10)
-+
-+
-+/* global variable to indicate reservation/release of gpio's*/
-+static uint32 sb_gpioreservation = 0;
++#ifndef	_pktq_h_
++#define	_pktq_h_
 +
-+#define	SB_INFO(sbh)	(sb_info_t*)sbh
-+#define	SET_SBREG(sbh, r, mask, val)	W_SBREG((sbh), (r), ((R_SBREG((sbh), (r)) & ~(mask)) | (val)))
-+#define	GOODCOREADDR(x)	(((x) >= SB_ENUM_BASE) && ((x) <= SB_ENUM_LIM) && ISALIGNED((x), SB_CORE_SIZE))
-+#define	GOODREGS(regs)	((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
-+#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
-+#define	GOODIDX(idx)	(((uint)idx) < SB_MAXCORES)
-+#define	BADIDX		(SB_MAXCORES+1)
-+#define	NOREV		-1
++/*** driver-only section ***/
++#ifdef BCMDRIVER
 +
-+#define PCI(si)		((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI)) 
-+#define PCIE(si)	((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE)) 
++/* generic osl packet queue */
++struct pktq {
++	void *head;	/* first packet to dequeue */
++	void *tail;	/* last packet to dequeue */
++	uint len;	/* number of queued packets */
++	uint maxlen;	/* maximum number of queued packets */
++	bool priority;	/* enqueue by packet priority */
++	uint8 prio_map[MAXPRIO+1]; /* user priority to packet enqueue policy map */
++};
++#define DEFAULT_QLEN	128
 +
-+/* sonicsrev */
-+#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
-+#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
++#define	pktq_len(q)	((q)->len)
++#define	pktq_avail(q)	((q)->maxlen - (q)->len)
++#define	pktq_head(q)	((q)->head)
++#define	pktq_full(q)	((q)->len >= (q)->maxlen)
++#define	_pktq_pri(q, pri)	((q)->prio_map[pri])
++#define	pktq_tailpri(q)	((q)->tail ? _pktq_pri(q, PKTPRIO((q)->tail)) : _pktq_pri(q, 0))
 +
-+#define	R_SBREG(sbh, sbr)	sb_read_sbreg((sbh), (sbr))
-+#define	W_SBREG(sbh, sbr, v)	sb_write_sbreg((sbh), (sbr), (v))
-+#define	AND_SBREG(sbh, sbr, v)	W_SBREG((sbh), (sbr), (R_SBREG((sbh), (sbr)) & (v)))
-+#define	OR_SBREG(sbh, sbr, v)	W_SBREG((sbh), (sbr), (R_SBREG((sbh), (sbr)) | (v)))
++/* externs */
++/* packet */
++extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
++extern uint pkttotlen(osl_t *osh, void *);
++extern void pktq_init(struct pktq *q, uint maxlen, const uint8 prio_map[]);
++extern void pktenq(struct pktq *q, void *p, bool lifo);
++extern void *pktdeq(struct pktq *q);
++extern void *pktdeqtail(struct pktq *q);
 +
++#endif
++#endif	/* _pktq_h_ */
+diff -Naur linux.old/drivers/net/wl/sbhnddma.h linux.dev/drivers/net/wl/sbhnddma.h
+--- linux.old/drivers/net/wl/sbhnddma.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl/sbhnddma.h	2006-04-06 15:34:14.000000000 +0200
+@@ -0,0 +1,312 @@
 +/*
-+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
-+ * after core switching to avoid invalid register accesss inside ISR.
++ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
++ * This supports the following chips: BCM42xx, 44xx, 47xx .
++ *
++ * Copyright 2005, Broadcom Corporation      
++ * All Rights Reserved.      
++ *       
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
++ * $Id$
 + */
-+#define INTR_OFF(si, intr_val) \
-+	if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
-+		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
-+#define INTR_RESTORE(si, intr_val) \
-+	if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
-+		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
-+
-+/* dynamic clock control defines */
-+#define	LPOMINFREQ	25000			/* low power oscillator min */
-+#define	LPOMAXFREQ	43000			/* low power oscillator max */
-+#define	XTALMINFREQ	19800000		/* 20 MHz - 1% */
-+#define	XTALMAXFREQ	20200000		/* 20 MHz + 1% */
-+#define	PCIMINFREQ	25000000		/* 25 MHz */
-+#define	PCIMAXFREQ	34000000		/* 33 MHz + fudge */
-+
-+#define	ILP_DIV_5MHZ	0			/* ILP = 5 MHz */
-+#define	ILP_DIV_1MHZ	4			/* ILP = 1 MHz */
-+
-+#define MIN_DUMPBUFLEN  32	/* debug */
-+
-+/* different register spaces to access thr'u pcie indirect access*/
-+#define PCIE_CONFIGREGS 	1
-+#define PCIE_PCIEREGS 		2
 +
-+/* GPIO Based LED powersave defines */
-+#define DEFAULT_GPIO_ONTIME	10
-+#define DEFAULT_GPIO_OFFTIME	90
++#ifndef	_sbhnddma_h_
++#define	_sbhnddma_h_
 +
-+#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
++ 
++/* 2byte-wide pio register set per channel(xmt or rcv) */
++typedef volatile struct {
++	uint16	fifocontrol;
++	uint16	fifodata;
++	uint16	fifofree;	/* only valid in xmt channel, not in rcv channel */
++	uint16	PAD;
++} pio2regs_t;
 +
-+static uint32
-+sb_read_sbreg(sb_info_t *si, volatile uint32 *sbr)
-+{
-+	uint8 tmp;
-+	uint32 val, intr_val = 0;
++/* a pair of pio channels(tx and rx) */
++typedef volatile struct {
++	pio2regs_t	tx;
++	pio2regs_t	rx;
++} pio2regp_t;
 +
++/* 4byte-wide pio register set per channel(xmt or rcv) */
++typedef volatile struct {
++	uint32	fifocontrol;
++	uint32	fifodata;
++} pio4regs_t;
 +
-+	/*
-+	 * compact flash only has 11 bits address, while we needs 12 bits address.
-+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
-+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
-+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
-+	 */
-+	if(si->memseg) {
-+		INTR_OFF(si, intr_val);
-+		tmp = 1;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
-+		sbr = (uint32) ((uintptr) sbr & ~(1 << 11));	/* mask out bit 11*/
-+	}
++/* a pair of pio channels(tx and rx) */
++typedef volatile struct {
++	pio4regs_t	tx;
++	pio4regs_t	rx;
++} pio4regp_t;
 +
-+	val = R_REG(sbr);
 +
-+	if(si->memseg) {
-+		tmp = 0;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
-+		INTR_RESTORE(si, intr_val);
-+	}
 +
-+	return (val);
-+}
++/* DMA structure:
++ *  support two DMA engines: 32 bits address or 64 bit addressing
++ *  basic DMA register set is per channel(transmit or receive)
++ *  a pair of channels is defined for convenience
++ */
 +
-+static void
-+sb_write_sbreg(sb_info_t *si, volatile uint32 *sbr, uint32 v)
-+{
-+	uint8 tmp;
-+	volatile uint32 dummy;
-+	uint32 intr_val = 0;
 +
++/*** 32 bits addressing ***/ 
 +
-+	/*
-+	 * compact flash only has 11 bits address, while we needs 12 bits address.
-+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
-+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
-+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
-+	 */
-+	if(si->memseg) {
-+		INTR_OFF(si, intr_val);
-+		tmp = 1;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
-+		sbr = (uint32) ((uintptr) sbr & ~(1 << 11));	/* mask out bit 11*/
-+	}
++/* dma registers per channel(xmt or rcv) */
++typedef volatile struct {
++	uint32	control;		/* enable, et al */
++	uint32	addr;			/* descriptor ring base address (4K aligned) */
++	uint32	ptr;			/* last descriptor posted to chip */
++	uint32	status;			/* current active descriptor, et al */
++} dma32regs_t;
 +
-+	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
-+#ifdef IL_BIGENDIAN
-+		dummy = R_REG(sbr);
-+		W_REG(((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
-+		dummy = R_REG(sbr);
-+		W_REG((volatile uint16 *)sbr, (uint16)(v & 0xffff));
-+#else
-+		dummy = R_REG(sbr);
-+		W_REG((volatile uint16 *)sbr, (uint16)(v & 0xffff));
-+		dummy = R_REG(sbr);
-+		W_REG(((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
-+#endif
-+	} else
-+		W_REG(sbr, v);
++typedef volatile struct {
++	dma32regs_t	xmt;		/* dma tx channel */
++	dma32regs_t	rcv;		/* dma rx channel */
++} dma32regp_t;
 +
-+	if(si->memseg) {
-+		tmp = 0;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
-+		INTR_RESTORE(si, intr_val);
-+	}
-+}
++typedef volatile struct {	/* diag access */
++	uint32	fifoaddr;		/* diag address */
++	uint32	fifodatalow;		/* low 32bits of data */
++	uint32	fifodatahigh;		/* high 32bits of data */
++	uint32	pad;			/* reserved */
++} dma32diag_t;
 +
 +/*
-+ * Allocate a sb handle.
-+ * devid - pci device id (used to determine chip#)
-+ * osh - opaque OS handle
-+ * regs - virtual address of initial core registers
-+ * bustype - pci/pcmcia/sb/sdio/etc
-+ * vars - pointer to a pointer area for "environment" variables
-+ * varsz - pointer to int to return the size of the vars
++ * DMA Descriptor
++ * Descriptors are only read by the hardware, never written back.
 + */
-+sb_t * 
-+BCMINITFN(sb_attach)(uint devid, osl_t *osh, void *regs,
-+	uint bustype, void *sdh, char **vars, int *varsz)
-+{
-+	sb_info_t *si;
++typedef volatile struct {
++	uint32	ctrl;		/* misc control bits & bufcount */
++	uint32	addr;		/* data buffer address */
++} dma32dd_t;
 +
-+	/* alloc sb_info_t */
-+	if ((si = MALLOC(osh, sizeof (sb_info_t))) == NULL) {
-+		SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
-+		return (NULL);
-+	}
++/*
++ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
++ */
++#define	D32MAXRINGSZ	4096
++#define	D32RINGALIGN	4096
++#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
 +
-+	if (BCMINIT(sb_doattach)(si, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
-+		MFREE(osh, si, sizeof (sb_info_t));
-+		return (NULL);
-+	}
-+	return (sb_t *)si;
-+}
++/* transmit channel control */
++#define	XC_XE		((uint32)1 << 0)	/* transmit enable */
++#define	XC_SE		((uint32)1 << 1)	/* transmit suspend request */
++#define	XC_LE		((uint32)1 << 2)	/* loopback enable */
++#define	XC_FL		((uint32)1 << 4)	/* flush request */
++#define	XC_AE		((uint32)3 << 16)	/* address extension bits */
++#define	XC_AE_SHIFT	16
 +
-+/* Using sb_kattach depends on SB_BUS support, either implicit  */
-+/* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
-+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
++/* transmit descriptor table pointer */
++#define	XP_LD_MASK	0xfff			/* last valid descriptor */
 +
-+/* global kernel resource */
-+static sb_info_t ksi;
++/* transmit channel status */
++#define	XS_CD_MASK	0x0fff			/* current descriptor pointer */
++#define	XS_XS_MASK	0xf000			/* transmit state */
++#define	XS_XS_SHIFT	12
++#define	XS_XS_DISABLED	0x0000			/* disabled */
++#define	XS_XS_ACTIVE	0x1000			/* active */
++#define	XS_XS_IDLE	0x2000			/* idle wait */
++#define	XS_XS_STOPPED	0x3000			/* stopped */
++#define	XS_XS_SUSP	0x4000			/* suspend pending */
++#define	XS_XE_MASK	0xf0000			/* transmit errors */
++#define	XS_XE_SHIFT	16
++#define	XS_XE_NOERR	0x00000			/* no error */
++#define	XS_XE_DPE	0x10000			/* descriptor protocol error */
++#define	XS_XE_DFU	0x20000			/* data fifo underrun */
++#define	XS_XE_BEBR	0x30000			/* bus error on buffer read */
++#define	XS_XE_BEDA	0x40000			/* bus error on descriptor access */
++#define	XS_AD_MASK	0xfff00000		/* active descriptor */
++#define	XS_AD_SHIFT	20
 +
-+/* generic kernel variant of sb_attach() */
-+sb_t * 
-+BCMINITFN(sb_kattach)()
-+{
-+	uint32 *regs;
++/* receive channel control */
++#define	RC_RE		((uint32)1 << 0)	/* receive enable */
++#define	RC_RO_MASK	0xfe			/* receive frame offset */
++#define	RC_RO_SHIFT	1
++#define	RC_FM		((uint32)1 << 8)	/* direct fifo receive (pio) mode */
++#define	RC_AE		((uint32)3 << 16)	/* address extension bits */
++#define	RC_AE_SHIFT	16
 +
-+	if (ksi.curmap == NULL) {
-+		uint32 cid;
++/* receive descriptor table pointer */
++#define	RP_LD_MASK	0xfff			/* last valid descriptor */
 +
-+		regs = (uint32 *)REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
-+		cid = R_REG((uint32 *)regs);
-+		if (((cid & CID_ID_MASK) == BCM4712_DEVICE_ID) &&
-+		    ((cid & CID_PKG_MASK) != BCM4712LARGE_PKG_ID) &&
-+		    ((cid & CID_REV_MASK) <= (3 << CID_REV_SHIFT))) {
-+			uint32 *scc, val;
++/* receive channel status */
++#define	RS_CD_MASK	0x0fff			/* current descriptor pointer */
++#define	RS_RS_MASK	0xf000			/* receive state */
++#define	RS_RS_SHIFT	12
++#define	RS_RS_DISABLED	0x0000			/* disabled */
++#define	RS_RS_ACTIVE	0x1000			/* active */
++#define	RS_RS_IDLE	0x2000			/* idle wait */
++#define	RS_RS_STOPPED	0x3000			/* reserved */
++#define	RS_RE_MASK	0xf0000			/* receive errors */
++#define	RS_RE_SHIFT	16
++#define	RS_RE_NOERR	0x00000			/* no error */
++#define	RS_RE_DPE	0x10000			/* descriptor protocol error */
++#define	RS_RE_DFO	0x20000			/* data fifo overflow */
++#define	RS_RE_BEBW	0x30000			/* bus error on buffer write */
++#define	RS_RE_BEDA	0x40000			/* bus error on descriptor access */
++#define	RS_AD_MASK	0xfff00000		/* active descriptor */
++#define	RS_AD_SHIFT	20
 +
-+			scc = (uint32 *)((uchar*)regs + OFFSETOF(chipcregs_t, slow_clk_ctl));
-+			val = R_REG(scc);
-+			SB_ERROR(("    initial scc = 0x%x\n", val));
-+			val |= SCC_SS_XTAL;
-+			W_REG(scc, val);
-+		}
++/* fifoaddr */
++#define	FA_OFF_MASK	0xffff			/* offset */
++#define	FA_SEL_MASK	0xf0000			/* select */
++#define	FA_SEL_SHIFT	16
++#define	FA_SEL_XDD	0x00000			/* transmit dma data */
++#define	FA_SEL_XDP	0x10000			/* transmit dma pointers */
++#define	FA_SEL_RDD	0x40000			/* receive dma data */
++#define	FA_SEL_RDP	0x50000			/* receive dma pointers */
++#define	FA_SEL_XFD	0x80000			/* transmit fifo data */
++#define	FA_SEL_XFP	0x90000			/* transmit fifo pointers */
++#define	FA_SEL_RFD	0xc0000			/* receive fifo data */
++#define	FA_SEL_RFP	0xd0000			/* receive fifo pointers */
++#define	FA_SEL_RSD	0xe0000			/* receive frame status data */
++#define	FA_SEL_RSP	0xf0000			/* receive frame status pointers */
 +
-+		if (BCMINIT(sb_doattach)(&ksi, BCM4710_DEVICE_ID, NULL, (void*)regs,
-+			SB_BUS, NULL, NULL, NULL) == NULL) {
-+			return NULL;
-+		}
-+	}
++/* descriptor control flags */
++#define	CTRL_BC_MASK	0x1fff			/* buffer byte count */
++#define	CTRL_AE		((uint32)3 << 16)	/* address extension bits */
++#define	CTRL_AE_SHIFT	16
++#define	CTRL_EOT	((uint32)1 << 28)	/* end of descriptor table */
++#define	CTRL_IOC	((uint32)1 << 29)	/* interrupt on completion */
++#define	CTRL_EOF	((uint32)1 << 30)	/* end of frame */
++#define	CTRL_SOF	((uint32)1 << 31)	/* start of frame */
 +
-+	return (sb_t *)&ksi;
-+}
-+#endif
++/* control flags in the range [27:20] are core-specific and not defined here */
++#define	CTRL_CORE_MASK	0x0ff00000
 +
-+static sb_info_t  * 
-+BCMINITFN(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
-+	uint bustype, void *sdh, char **vars, int *varsz)
-+{
-+	uint origidx;
-+	chipcregs_t *cc;
-+	sbconfig_t *sb;
-+	uint32 w;
++/*** 64 bits addressing ***/
 +
-+	ASSERT(GOODREGS(regs));
++/* dma registers per channel(xmt or rcv) */
++typedef volatile struct {
++	uint32	control;		/* enable, et al */
++	uint32	ptr;			/* last descriptor posted to chip */
++	uint32	addrlow;		/* descriptor ring base address low 32-bits (8K aligned) */
++	uint32	addrhigh;		/* descriptor ring base address bits 63:32 (8K aligned) */
++	uint32	status0;		/* current descriptor, xmt state */
++	uint32	status1;		/* active descriptor, xmt error */
++} dma64regs_t;
 +
-+	bzero((uchar*)si, sizeof (sb_info_t));
++typedef volatile struct {
++	dma64regs_t	tx;		/* dma64 tx channel */
++	dma64regs_t	rx;		/* dma64 rx channel */
++} dma64regp_t;
 +
-+	si->sb.buscoreidx = si->gpioidx = BADIDX;
++typedef volatile struct {		/* diag access */
++	uint32	fifoaddr;		/* diag address */
++	uint32	fifodatalow;		/* low 32bits of data */
++	uint32	fifodatahigh;		/* high 32bits of data */
++	uint32	pad;			/* reserved */
++} dma64diag_t;
 +
-+	si->osh = osh;
-+	si->curmap = regs;
-+	si->sdh = sdh;
++/*
++ * DMA Descriptor
++ * Descriptors are only read by the hardware, never written back.
++ */
++typedef volatile struct {
++	uint32	ctrl1;		/* misc control bits & bufcount */
++	uint32	ctrl2;		/* buffer count and address extension */
++	uint32	addrlow;	/* memory address of the first byte of the date buffer, bits 31:0 */
++	uint32	addrhigh;	/* memory address of the first byte of the date buffer, bits 63:32 */
++} dma64dd_t;
 +
-+	/* check to see if we are a sb core mimic'ing a pci core */
-+	if (bustype == PCI_BUS) {
-+		if (OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof (uint32)) == 0xffffffff)
-+			bustype = SB_BUS;
-+		else
-+			bustype = PCI_BUS;
-+	}
++/*
++ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
++ */
++#define	D64MAXRINGSZ	8192
++#define	D64RINGALIGN	8192
++#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
 +
-+	si->sb.bustype = bustype;
-+	if (si->sb.bustype != BUSTYPE(si->sb.bustype)) {
-+		SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n",
-+			  si->sb.bustype, BUSTYPE(si->sb.bustype)));
-+		return NULL;
-+	}
++/* transmit channel control */
++#define	D64_XC_XE		0x00000001	/* transmit enable */
++#define	D64_XC_SE		0x00000002	/* transmit suspend request */
++#define	D64_XC_LE		0x00000004	/* loopback enable */
++#define	D64_XC_FL		0x00000010	/* flush request */
++#define	D64_XC_AE		0x00110000	/* address extension bits */
++#define	D64_XC_AE_SHIFT		16
 +
-+	/* need to set memseg flag for CF card first before any sb registers access */
-+	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
-+		si->memseg = TRUE;
++/* transmit descriptor table pointer */
++#define	D64_XP_LD_MASK		0x00000fff	/* last valid descriptor */
++
++/* transmit channel status */
++#define	D64_XS0_CD_MASK		0x00001fff	/* current descriptor pointer */
++#define	D64_XS0_XS_MASK		0xf0000000     	/* transmit state */
++#define	D64_XS0_XS_SHIFT		28
++#define	D64_XS0_XS_DISABLED	0x00000000	/* disabled */
++#define	D64_XS0_XS_ACTIVE	0x10000000	/* active */
++#define	D64_XS0_XS_IDLE		0x20000000	/* idle wait */
++#define	D64_XS0_XS_STOPPED	0x30000000	/* stopped */
++#define	D64_XS0_XS_SUSP		0x40000000	/* suspend pending */
 +
-+	/* kludge to enable the clock on the 4306 which lacks a slowclock */
-+	if (BUSTYPE(si->sb.bustype) == PCI_BUS)
-+		sb_clkctl_xtal(&si->sb, XTAL|PLL, ON);
++#define	D64_XS1_AD_MASK		0x0001ffff	/* active descriptor */
++#define	D64_XS1_XE_MASK		0xf0000000     	/* transmit errors */
++#define	D64_XS1_XE_SHIFT		28
++#define	D64_XS1_XE_NOERR	0x00000000	/* no error */
++#define	D64_XS1_XE_DPE		0x10000000	/* descriptor protocol error */
++#define	D64_XS1_XE_DFU		0x20000000	/* data fifo underrun */
++#define	D64_XS1_XE_DTE		0x30000000	/* data transfer error */
++#define	D64_XS1_XE_DESRE	0x40000000	/* descriptor read error */
++#define	D64_XS1_XE_COREE	0x50000000	/* core error */
 +
-+	if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
-+		w = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN, sizeof (uint32));
-+		if (!GOODCOREADDR(w))
-+			OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof (uint32), SB_ENUM_BASE);
-+	}
++/* receive channel control */
++#define	D64_RC_RE		0x00000001	/* receive enable */
++#define	D64_RC_RO_MASK		0x000000fe	/* receive frame offset */
++#define	D64_RC_RO_SHIFT		1
++#define	D64_RC_FM		0x00000100	/* direct fifo receive (pio) mode */
++#define	D64_RC_AE		0x00110000	/* address extension bits */
++#define	D64_RC_AE_SHIFT		16
 +
-+	/* initialize current core index value */
-+	si->curidx = _sb_coreidx(si);
++/* receive descriptor table pointer */
++#define	D64_RP_LD_MASK		0x00000fff	/* last valid descriptor */
 +
-+	if (si->curidx == BADIDX) {
-+		SB_ERROR(("sb_doattach: bad core index\n"));
-+		return NULL;
-+	}
++/* receive channel status */
++#define	D64_RS0_CD_MASK		0x00001fff	/* current descriptor pointer */
++#define	D64_RS0_RS_MASK		0xf0000000     	/* receive state */
++#define	D64_RS0_RS_SHIFT		28
++#define	D64_RS0_RS_DISABLED	0x00000000	/* disabled */
++#define	D64_RS0_RS_ACTIVE	0x10000000	/* active */
++#define	D64_RS0_RS_IDLE		0x20000000	/* idle wait */
++#define	D64_RS0_RS_STOPPED	0x30000000	/* stopped */
++#define	D64_RS0_RS_SUSP		0x40000000	/* suspend pending */
 +
-+	/* get sonics backplane revision */
-+	sb = REGS2SB(si->curmap);
-+	si->sb.sonicsrev = (R_SBREG(si, &(sb)->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
++#define	D64_RS1_AD_MASK		0x0001ffff	/* active descriptor */
++#define	D64_RS1_RE_MASK		0xf0000000     	/* receive errors */
++#define	D64_RS1_RE_SHIFT		28
++#define	D64_RS1_RE_NOERR	0x00000000	/* no error */
++#define	D64_RS1_RE_DPO		0x10000000	/* descriptor protocol error */
++#define	D64_RS1_RE_DFU		0x20000000	/* data fifo overflow */
++#define	D64_RS1_RE_DTE		0x30000000	/* data transfer error */
++#define	D64_RS1_RE_DESRE	0x40000000	/* descriptor read error */
++#define	D64_RS1_RE_COREE	0x50000000	/* core error */
 +
-+	/* keep and reuse the initial register mapping */
-+	origidx = si->curidx;
-+	if (BUSTYPE(si->sb.bustype) == SB_BUS)
-+		si->regs[origidx] = regs;
++/* fifoaddr */
++#define	D64_FA_OFF_MASK		0xffff		/* offset */
++#define	D64_FA_SEL_MASK		0xf0000		/* select */
++#define	D64_FA_SEL_SHIFT	16
++#define	D64_FA_SEL_XDD		0x00000		/* transmit dma data */
++#define	D64_FA_SEL_XDP		0x10000		/* transmit dma pointers */
++#define	D64_FA_SEL_RDD		0x40000		/* receive dma data */
++#define	D64_FA_SEL_RDP		0x50000		/* receive dma pointers */
++#define	D64_FA_SEL_XFD		0x80000		/* transmit fifo data */
++#define	D64_FA_SEL_XFP		0x90000		/* transmit fifo pointers */
++#define	D64_FA_SEL_RFD		0xc0000		/* receive fifo data */
++#define	D64_FA_SEL_RFP		0xd0000		/* receive fifo pointers */
++#define	D64_FA_SEL_RSD		0xe0000		/* receive frame status data */
++#define	D64_FA_SEL_RSP		0xf0000		/* receive frame status pointers */
 +
-+	/* is core-0 a chipcommon core? */
-+	si->numcores = 1;
-+	cc = (chipcregs_t*) sb_setcoreidx(&si->sb, 0);
-+	if (sb_coreid(&si->sb) != SB_CC)
-+		cc = NULL;
++/* descriptor control flags 1 */
++#define	D64_CTRL1_EOT		((uint32)1 << 28)	/* end of descriptor table */
++#define	D64_CTRL1_IOC		((uint32)1 << 29)	/* interrupt on completion */
++#define	D64_CTRL1_EOF		((uint32)1 << 30)	/* end of frame */
++#define	D64_CTRL1_SOF		((uint32)1 << 31)	/* start of frame */
 +
-+	/* determine chip id and rev */
-+	if (cc) {
-+		/* chip common core found! */
-+		si->sb.chip = R_REG(&cc->chipid) & CID_ID_MASK;
-+		si->sb.chiprev = (R_REG(&cc->chipid) & CID_REV_MASK) >> CID_REV_SHIFT;
-+		si->sb.chippkg = (R_REG(&cc->chipid) & CID_PKG_MASK) >> CID_PKG_SHIFT;
-+	} else {
-+		/* The only pcmcia chip without a chipcommon core is a 4301 */
-+		if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
-+			devid = BCM4301_DEVICE_ID;
++/* descriptor control flags 2 */
++#define	D64_CTRL2_BC_MASK	0x00007fff	/* buffer byte count mask */
++#define	D64_CTRL2_AE		0x00110000	/* address extension bits */
++#define	D64_CTRL2_AE_SHIFT	16
 +
-+		/* no chip common core -- must convert device id to chip id */
-+		if ((si->sb.chip = BCMINIT(sb_pcidev2chip)(devid)) == 0) {
-+			SB_ERROR(("sb_doattach: unrecognized device id 0x%04x\n", devid));
-+			sb_setcoreidx(&si->sb, origidx);
-+			return NULL;
-+		}
-+	}
++/* control flags in the range [27:20] are core-specific and not defined here */
++#define	D64_CTRL_CORE_MASK	0x0ff00000
 +
-+	/* get chipcommon rev */
-+	si->sb.ccrev = cc ? (int)sb_corerev(&si->sb) : NOREV;
 +
-+	/* determine numcores */
-+	if (cc && ((si->sb.ccrev == 4) || (si->sb.ccrev >= 6)))
-+		si->numcores = (R_REG(&cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
-+	else
-+		si->numcores = BCMINIT(sb_chip2numcores)(si->sb.chip);
++#endif	/* _sbhnddma_h_ */
+diff -Naur linux.old/drivers/net/wl2/Makefile linux.dev/drivers/net/wl2/Makefile
+--- linux.old/drivers/net/wl2/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl2/Makefile	2006-04-06 16:56:27.000000000 +0200
+@@ -0,0 +1,23 @@
++#
++# Makefile for the Broadcom wl driver
++#
++# Copyright 2004, Broadcom Corporation
++# All Rights Reserved.
++# 
++# THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
++# KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
++# SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
++# FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
++#
++# $Id: Makefile,v 1.2 2005/03/29 03:32:18 mbm Exp $
 +
-+	/* return to original core */
-+	sb_setcoreidx(&si->sb, origidx);
++EXTRA_CFLAGS += -I$(TOPDIR)/arch/mips/bcm947xx/include -DBCMDRIVER
 +
-+	/* sanity checks */
-+	ASSERT(si->sb.chip);
++O_TARGET	:= wl.o
 +
-+	/* scan for cores */
-+	BCMINIT(sb_scan)(si);
++obj-y		:= wl_apsta.o
++obj-y		+= compat.o hnddma.o
 +
-+	/* fixup necessary chip/core configurations */
-+	if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
-+		if (sb_pci_fixcfg(si)) {
-+			SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
-+			return NULL;
-+		}
-+	}
-+	
-+	/* srom_var_init() depends on sb_scan() info */
-+	if (srom_var_init(si, si->sb.bustype, si->curmap, osh, vars, varsz)) {
-+		SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
-+		return (NULL);
-+	}
-+	
-+	if (cc == NULL) {
-+		/*
-+		 * The chip revision number is hardwired into all
-+		 * of the pci function config rev fields and is
-+		 * independent from the individual core revision numbers.
-+		 * For example, the "A0" silicon of each chip is chip rev 0.
-+		 * For PCMCIA we get it from the CIS instead.
-+		 */
-+		if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
-+			ASSERT(vars);
-+			si->sb.chiprev = getintvar(*vars, "chiprev");
-+		} else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
-+			w = OSL_PCI_READ_CONFIG(osh, PCI_CFG_REV, sizeof (uint32));
-+			si->sb.chiprev = w & 0xff;
-+		} else
-+			si->sb.chiprev = 0;
-+	}
++obj-m		:= $(O_TARGET)
 +
-+	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
-+		w = getintvar(*vars, "regwindowsz");
-+		si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
-+	}
++include $(TOPDIR)/Rules.make
+diff -Naur linux.old/drivers/net/wl2/compat.c linux.dev/drivers/net/wl2/compat.c
+--- linux.old/drivers/net/wl2/compat.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl2/compat.c	2006-04-06 17:08:21.000000000 +0200
+@@ -0,0 +1,376 @@
++/*
++ * Misc useful OS-independent routines.
++ *
++ * Copyright 2005, Broadcom Corporation      
++ * All Rights Reserved.      
++ *       
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
++ * $Id$
++ */
 +
-+	/* gpio control core is required */
-+	if (!GOODIDX(si->gpioidx)) {
-+		SB_ERROR(("sb_doattach: gpio control core not found\n"));
-+		return NULL;
-+	}
++#include <typedefs.h>
++#ifdef BCMDRIVER
++#include <osl.h>
++#include <sbutils.h>
++#include <bcmnvram.h>
++#else
++#include <stdio.h>
++#include <string.h>
++#endif
++#include "pktq.h"
++#include <bcmutils.h>
++#include <bcmendian.h>
++#include <bcmdevs.h>
 +
-+	/* get boardtype and boardrev */
-+	switch (BUSTYPE(si->sb.bustype)) {
-+	case PCI_BUS:
-+		/* do a pci config read to get subsystem id and subvendor id */
-+		w = OSL_PCI_READ_CONFIG(osh, PCI_CFG_SVID, sizeof (uint32));
-+		si->sb.boardvendor = w & 0xffff;
-+		si->sb.boardtype = (w >> 16) & 0xffff;
-+		break;
++#ifdef BCMDRIVER
++/* copy a pkt buffer chain into a buffer */
++uint
++pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
++{
++	uint n, ret = 0;
 +
-+	case PCMCIA_BUS:
-+	case SDIO_BUS:
-+		si->sb.boardvendor = getintvar(*vars, "manfid");
-+		si->sb.boardtype = getintvar(*vars, "prodid");
-+		break;
++	if (len < 0)
++		len = 4096;	/* "infinite" */
 +
-+	case SB_BUS:
-+	case JTAG_BUS:
-+		si->sb.boardvendor = VENDOR_BROADCOM;
-+		if ((si->sb.boardtype = getintvar(NULL, "boardtype")) == 0)
-+			si->sb.boardtype = 0xffff;
-+		break;
++	/* skip 'offset' bytes */
++	for (; p && offset; p = PKTNEXT(osh, p)) {
++		if (offset < (uint)PKTLEN(osh, p))
++			break;
++		offset -= PKTLEN(osh, p);
 +	}
 +
-+	if (si->sb.boardtype == 0) {
-+		SB_ERROR(("sb_doattach: unknown board type\n"));
-+		ASSERT(si->sb.boardtype);
-+	}
++	if (!p)
++		return 0;
 +
-+	/* setup the GPIO based LED powersave register */
-+	if (si->sb.ccrev >= 16) {
-+		w = getintvar(*vars, "gpiotimerval");
-+		if (!w)
-+			w = DEFAULT_GPIOTIMERVAL; 
-+		sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
++	/* copy the data */
++	for (; p && len; p = PKTNEXT(osh, p)) {
++		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
++		bcopy(PKTDATA(osh, p) + offset, buf, n);
++		buf += n;
++		len -= n;
++		ret += n;
++		offset = 0;
 +	}
 +
-+
-+	return (si);
++	return ret;
 +}
 +
++/* return total length of buffer chain */
 +uint
-+sb_coreid(sb_t *sbh)
++pkttotlen(osl_t *osh, void *p)
 +{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
-+
-+	si = SB_INFO(sbh);
-+	sb = REGS2SB(si->curmap);
++	uint total;
 +
-+	return ((R_SBREG(si, &(sb)->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
++	total = 0;
++	for (; p; p = PKTNEXT(osh, p))
++		total += PKTLEN(osh, p);
++	return (total);
 +}
 +
-+uint
-+sb_coreidx(sb_t *sbh)
++/*
++ * osl multiple-precedence packet queue
++ * hi_prec is always >= the number of the highest non-empty queue
++ */
++
++void *
++pktq_penq(struct pktq *pq, int prec, void *p)
 +{
-+	sb_info_t *si;
++	struct pktq_prec *q;
 +
-+	si = SB_INFO(sbh);
-+	return (si->curidx);
++	ASSERT(prec >= 0 && prec < pq->num_prec);
++	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
++
++	ASSERT(!pktq_full(pq));
++	ASSERT(!pktq_pfull(pq, prec));
++
++	q = &pq->q[prec];
++
++	if (q->head)
++		PKTSETLINK(q->tail, p);
++	else
++		q->head = p;
++
++	q->tail = p;
++	q->len++;
++
++	if (pq->hi_prec < prec)
++		pq->hi_prec = (uint8)prec;
++
++	pq->len++;
++
++	return p;
 +}
 +
-+/* return current index of core */
-+static uint
-+_sb_coreidx(sb_info_t *si)
++void *
++pktq_penq_head(struct pktq *pq, int prec, void *p)
 +{
-+	sbconfig_t *sb;
-+	uint32 sbaddr = 0;
-+
-+	ASSERT(si);
++	struct pktq_prec *q;
 +
-+	switch (BUSTYPE(si->sb.bustype)) {
-+	case SB_BUS:
-+		sb = REGS2SB(si->curmap);
-+		sbaddr = sb_base(R_SBREG(si, &sb->sbadmatch0));
-+		break;
++	ASSERT(prec >= 0 && prec < pq->num_prec);
++	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
 +
-+	case PCI_BUS:
-+		sbaddr = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof (uint32));
-+		break;
++	ASSERT(!pktq_full(pq));
++	ASSERT(!pktq_pfull(pq, prec));
 +
-+	case PCMCIA_BUS: {
-+		uint8 tmp = 0;
++	q = &pq->q[prec];
 +
-+		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
-+		sbaddr  = (uint)tmp << 12;
-+		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
-+		sbaddr |= (uint)tmp << 16;
-+		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
-+		sbaddr |= (uint)tmp << 24;
-+		break;
-+	}
++	if (q->head)
++		PKTSETLINK(p, q->head);
++	else
++		q->tail = p;
 +
-+#ifdef BCMJTAG
-+	case JTAG_BUS:
-+		sbaddr = (uint32)si->curmap;
-+		break;
-+#endif	/* BCMJTAG */
++	q->head = p;
++	q->len++;
 +
-+	default:
-+		ASSERT(0);
-+	}
++	if (pq->hi_prec < prec)
++		pq->hi_prec = (uint8)prec;
 +
-+	if (!GOODCOREADDR(sbaddr))
-+		return BADIDX;
++	pq->len++;
 +
-+	return ((sbaddr - SB_ENUM_BASE) / SB_CORE_SIZE);
++	return p;
 +}
 +
-+uint
-+sb_corevendor(sb_t *sbh)
++void *
++pktq_pdeq(struct pktq *pq, int prec)
 +{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
++	struct pktq_prec *q;
++	void *p;
 +
-+	si = SB_INFO(sbh);
-+	sb = REGS2SB(si->curmap);
++	ASSERT(prec >= 0 && prec < pq->num_prec);
 +
-+	return ((R_SBREG(si, &(sb)->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
-+}
++	q = &pq->q[prec];
 +
-+uint
-+sb_corerev(sb_t *sbh)
-+{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
-+	uint sbidh;
++	if ((p = q->head) == NULL)
++		return NULL;
 +
-+	si = SB_INFO(sbh);
-+	sb = REGS2SB(si->curmap);
-+	sbidh = R_SBREG(si, &(sb)->sbidhigh);
++	if ((q->head = PKTLINK(p)) == NULL)
++		q->tail = NULL;
 +
-+	return (SBCOREREV(sbidh));
++	q->len--;
++
++	pq->len--;
++
++	PKTSETLINK(p, NULL);
++
++	return p;
 +}
 +
 +void *
-+sb_osh(sb_t *sbh)
++pktq_pdeq_tail(struct pktq *pq, int prec)
 +{
-+	sb_info_t *si;
++	struct pktq_prec *q;
++	void *p, *prev;
 +
-+	si = SB_INFO(sbh);
-+	return si->osh;
-+}
++	ASSERT(prec >= 0 && prec < pq->num_prec);
 +
-+#define	SBTML_ALLOW	(SBTML_PE | SBTML_FGC | SBTML_FL_MASK)
++	q = &pq->q[prec];
 +
-+/* set/clear sbtmstatelow core-specific flags */
-+uint32
-+sb_coreflags(sb_t *sbh, uint32 mask, uint32 val)
-+{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
-+	uint32 w;
++	if ((p = q->head) == NULL)
++		return NULL;
 +
-+	si = SB_INFO(sbh);
-+	sb = REGS2SB(si->curmap);
++	for (prev = NULL; p != q->tail; p = PKTLINK(p))
++		prev = p;
 +
-+	ASSERT((val & ~mask) == 0);
-+	ASSERT((mask & ~SBTML_ALLOW) == 0);
++	if (prev)
++		PKTSETLINK(prev, NULL);
++	else
++		q->head = NULL;
 +
-+	/* mask and set */
-+	if (mask || val) {
-+		w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
-+		W_SBREG(si, &sb->sbtmstatelow, w);
-+	}
++	q->tail = prev;
++	q->len--;
 +
-+	/* return the new value */
-+	return (R_SBREG(si, &sb->sbtmstatelow) & SBTML_ALLOW);
++	pq->len--;
++
++	return p;
 +}
 +
-+/* set/clear sbtmstatehigh core-specific flags */
-+uint32
-+sb_coreflagshi(sb_t *sbh, uint32 mask, uint32 val)
++void
++pktq_init(struct pktq *pq, int num_prec, int max)
 +{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
-+	uint32 w;
++	int prec;
 +
-+	si = SB_INFO(sbh);
-+	sb = REGS2SB(si->curmap);
++	ASSERT(num_prec >= 0 && num_prec <= PKTQ_MAX_PREC);
 +
-+	ASSERT((val & ~mask) == 0);
-+	ASSERT((mask & ~SBTMH_FL_MASK) == 0);
++	bzero(pq, sizeof (*pq));
 +
-+	/* mask and set */
-+	if (mask || val) {
-+		w = (R_SBREG(si, &sb->sbtmstatehigh) & ~mask) | val;
-+		W_SBREG(si, &sb->sbtmstatehigh, w);
-+	}
++	pq->num_prec = (uint16)num_prec;
 +
-+	/* return the new value */
-+	return (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_FL_MASK);
++	pq->max = (uint16)max;
++
++	for (prec = 0; prec < num_prec; prec++)
++		pq->q[prec].max = pq->max;
 +}
 +
-+/* caller needs to take care of core-specific bist hazards */
-+int
-+sb_corebist(sb_t *sbh, uint coreid, uint coreunit)
++void *
++pktq_deq(struct pktq *pq, int *prec_out)
 +{
-+	uint32 sblo;
-+	uint coreidx;
-+	sb_info_t *si;
-+	int result = 0;
++	struct pktq_prec *q;
++	void *p;
++	int prec;
 +
-+	si = SB_INFO(sbh);
++	if (pq->len == 0)
++		return NULL;
 +
-+	coreidx = sb_findcoreidx(si, coreid, coreunit);
-+	if (!GOODIDX(coreidx))
-+		result = BCME_ERROR;
-+	else {
-+		sblo = sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatelow), 0, 0);
-+		sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatelow), ~0, (sblo | SBTML_FGC | SBTML_BE));
-+		
-+		SPINWAIT(((sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatehigh), 0, 0) & SBTMH_BISTD) == 0), 100000);
-+	
-+		if (sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatehigh), 0, 0) & SBTMH_BISTF)
-+			result = BCME_ERROR;
++	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
++		pq->hi_prec--;
 +
-+		sb_corereg(si, coreidx, SBCONFIGOFF + OFFSETOF(sbconfig_t, sbtmstatelow), ~0, sblo);
-+	}
++	q = &pq->q[prec];
 +
-+	return result;
-+}
++	if ((p = q->head) == NULL)
++		return NULL;
 +
-+bool
-+sb_iscoreup(sb_t *sbh)
-+{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
++	if ((q->head = PKTLINK(p)) == NULL)
++		q->tail = NULL;
 +
-+	si = SB_INFO(sbh);
-+	sb = REGS2SB(si->curmap);
++	q->len--;
 +
-+	return ((R_SBREG(si, &(sb)->sbtmstatelow) & (SBTML_RESET | SBTML_REJ_MASK | SBTML_CLK)) == SBTML_CLK);
++	if (prec_out)
++		*prec_out = prec;
++
++	pq->len--;
++
++	PKTSETLINK(p, NULL);
++
++	return p;
 +}
 +
-+/*
-+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
-+ * switch back to the original core, and return the new value.
-+ */
-+static uint
-+sb_corereg(sb_info_t *si, uint coreidx, uint regoff, uint mask, uint val)
++void *
++pktq_deq_tail(struct pktq *pq, int *prec_out)
 +{
-+	uint origidx;
-+	uint32 *r;
-+	uint w;
-+	uint intr_val = 0;
++	struct pktq_prec *q;
++	void *p, *prev;
++	int prec;
 +
-+	ASSERT(GOODIDX(coreidx));
-+	ASSERT(regoff < SB_CORE_SIZE);
-+	ASSERT((val & ~mask) == 0);
++	if (pq->len == 0)
++		return NULL;
 +
-+	INTR_OFF(si, intr_val);
++	for (prec = 0; prec < pq->hi_prec; prec++)
++		if (pq->q[prec].head)
++			break;
 +
-+	/* save current core index */
-+	origidx = sb_coreidx(&si->sb);
++	q = &pq->q[prec];
 +
-+	/* switch core */
-+	r = (uint32*) ((uchar*) sb_setcoreidx(&si->sb, coreidx) + regoff);
++	if ((p = q->head) == NULL)
++		return NULL;
 +
-+	/* mask and set */
-+	if (mask || val) {
-+		if (regoff >= SBCONFIGOFF) {
-+			w = (R_SBREG(si, r) & ~mask) | val;
-+			W_SBREG(si, r, w);
-+		} else {
-+			w = (R_REG(r) & ~mask) | val;
-+			W_REG(r, w);
-+		}
-+	}
++	for (prev = NULL; p != q->tail; p = PKTLINK(p))
++		prev = p;
 +
-+	/* readback */
-+	if (regoff >= SBCONFIGOFF)
-+		w = R_SBREG(si, r);
++	if (prev)
++		PKTSETLINK(prev, NULL);
 +	else
-+		w = R_REG(r);
++		q->head = NULL;
 +
-+	/* restore core index */
-+	if (origidx != coreidx)
-+		sb_setcoreidx(&si->sb, origidx);
++	q->tail = prev;
++	q->len--;
 +
-+	INTR_RESTORE(si, intr_val);
-+	return (w);
-+}
++	if (prec_out)
++		*prec_out = prec;
 +
-+#define DWORD_ALIGN(x)  (x & ~(0x03))
-+#define BYTE_POS(x) (x & 0x3)
-+#define WORD_POS(x) (x & 0x1)
++	pq->len--;
 +
-+#define BYTE_SHIFT(x)  (8 * BYTE_POS(x))
-+#define WORD_SHIFT(x)  (16 * WORD_POS(x))
++	PKTSETLINK(p, NULL);
 +
-+#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
-+#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
++	return p;
++}
 +
-+#define read_pci_cfg_byte(a) \
-+	(BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
++void *
++pktq_peek(struct pktq *pq, int *prec_out)
++{
++	void *p;
++	int prec;
 +
-+#define read_pci_cfg_write(a) \
-+	(WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
++	if (pq->len == 0)
++		return NULL;
++
++	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
++		pq->hi_prec--;
++
++	if ((p = pq->q[prec].head) == NULL)
++		return NULL;
 +
++	if (prec_out)
++		*prec_out = prec;
 +
-+/* return TRUE if requested capability exists in the PCI config space */
-+static bool 
-+sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen)
++	return p;
++}
++
++void *
++pktq_peek_tail(struct pktq *pq, int *prec_out)
 +{
-+	uint8 cap_id;
-+	uint8 cap_ptr;
-+	uint32 	bufsize;
-+	uint8 byte_val;
++	void *p;
++	int prec;
 +
-+	if (BUSTYPE(si->sb.bustype) != PCI_BUS)
-+		return FALSE;
++	if (pq->len == 0)
++		return NULL;
 +
-+	/* check for Header type 0*/
-+	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
-+	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
-+		return FALSE;
++	for (prec = 0; prec < pq->hi_prec; prec++)
++		if (pq->q[prec].head)
++			break;
 +
-+	/* check if the capability pointer field exists */
-+	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
-+	if (!(byte_val & PCI_CAPPTR_PRESENT))
-+		return FALSE;
++	if ((p = pq->q[prec].tail) == NULL)
++		return NULL;
 +
-+	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
-+	/* check if the capability pointer is 0x00 */
-+	if (cap_ptr == 0x00)
-+		return FALSE;
++	if (prec_out)
++		*prec_out = prec;
 +
++	return p;
++}
 +
-+	/* loop thr'u the capability list and see if the pcie capabilty exists */
++int
++pktq_mlen(struct pktq *pq, uint prec_bmp)
++{
++	int prec, len;
 +
-+	cap_id = read_pci_cfg_byte(cap_ptr);
++	len = 0;
 +
-+	while (cap_id != req_cap_id) {
-+		cap_ptr = read_pci_cfg_byte((cap_ptr+1));
-+		if (cap_ptr == 0x00) break;
-+		cap_id = read_pci_cfg_byte(cap_ptr);
-+	}
-+	if (cap_id != req_cap_id) {
-+		return FALSE;
-+	}
-+	/* found the caller requested capability */
-+	if ((buf != NULL) &&  (buflen != NULL)) {
-+		bufsize = *buflen;
-+		if (!bufsize) goto end;
-+		*buflen = 0;
-+		/* copy the cpability data excluding cap ID and next ptr */
-+		cap_ptr += 2;
-+		if ((bufsize + cap_ptr)  > SZPCR)
-+			bufsize = SZPCR - cap_ptr;
-+		*buflen = bufsize;
-+		while (bufsize--) {
-+			*buf = read_pci_cfg_byte(cap_ptr);
-+			cap_ptr++;
-+			buf++;
-+		}
-+	}
-+end:
-+	return TRUE;
-+}
++	for (prec = 0; prec <= pq->hi_prec; prec++)
++		if (prec_bmp & (1 << prec))
++			len += pq->q[prec].len;
 +
-+/* return TRUE if PCIE capability exists the pci config space */
-+static bool
-+sb_ispcie(sb_info_t *si)
-+{
-+	return(sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL));
++	return len;
 +}
 +
-+/* scan the sb enumerated space to identify all cores */
-+static void
-+BCMINITFN(sb_scan)(sb_info_t *si)
++void *
++pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
 +{
-+	uint origidx;
-+	uint i;
-+	bool pci;
-+	bool pcie;
-+	uint pciidx;
-+	uint pcieidx;
-+	uint pcirev;
-+	uint pcierev;
++	struct pktq_prec *q;
++	void *p;
++	int prec;
 +
++	if (pq->len == 0)
++		return NULL;
 +
++	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
++		pq->hi_prec--;
 +
-+	/* numcores should already be set */
-+	ASSERT((si->numcores > 0) && (si->numcores <= SB_MAXCORES));
++	while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
++		if (prec-- == 0)
++			return NULL;
 +
-+	/* save current core index */
-+	origidx = sb_coreidx(&si->sb);
++	q = &pq->q[prec];
 +
-+	si->sb.buscorerev = NOREV;
-+	si->sb.buscoreidx = BADIDX;
++	if ((p = q->head) == NULL)
++		return NULL;
 +
-+	si->gpioidx = BADIDX;
++	if ((q->head = PKTLINK(p)) == NULL)
++		q->tail = NULL;
 +
-+	pci = pcie = FALSE;
-+	pcirev = pcierev = NOREV;
-+	pciidx = pcieidx = BADIDX;
++	q->len--;
 +
-+	for (i = 0; i < si->numcores; i++) {
-+		sb_setcoreidx(&si->sb, i);
-+		si->coreid[i] = sb_coreid(&si->sb);
++	if (prec_out)
++		*prec_out = prec;
 +
-+		if (si->coreid[i] == SB_PCI) { 
-+			pciidx = i;
-+			pcirev = sb_corerev(&si->sb);
-+			pci = TRUE;
-+		} else if (si->coreid[i] == SB_PCIE) {
-+			pcieidx = i;
-+			pcierev = sb_corerev(&si->sb);
-+			pcie = TRUE;
-+		} else if (si->coreid[i] == SB_PCMCIA) {
-+			si->sb.buscorerev = sb_corerev(&si->sb);
-+			si->sb.buscoretype = si->coreid[i];
-+			si->sb.buscoreidx = i; 
-+		}
-+	}
-+	if (pci && pcie) {
-+		if (sb_ispcie(si))
-+			pci = FALSE;
-+		else
-+			pcie = FALSE;
-+	}
-+	if (pci) {
-+		si->sb.buscoretype = SB_PCI;
-+		si->sb.buscorerev = pcirev; 
-+		si->sb.buscoreidx = pciidx; 
-+	}
-+	else if (pcie) {
-+		si->sb.buscoretype = SB_PCIE;
-+		si->sb.buscorerev = pcierev; 
-+		si->sb.buscoreidx = pcieidx; 
-+	}
++	pq->len--;
 +
-+	/*
-+	 * Find the gpio "controlling core" type and index.
-+	 * Precedence:
-+	 * - if there's a chip common core - use that
-+	 * - else if there's a pci core (rev >= 2) - use that
-+	 * - else there had better be an extif core (4710 only)
-+	 */
-+	if (GOODIDX(sb_findcoreidx(si, SB_CC, 0))) {
-+		si->gpioidx = sb_findcoreidx(si, SB_CC, 0);
-+		si->gpioid = SB_CC;
-+	} else if (PCI(si) && (si->sb.buscorerev >= 2)) {
-+		si->gpioidx = si->sb.buscoreidx;
-+		si->gpioid = SB_PCI;
-+	} else if (sb_findcoreidx(si, SB_EXTIF, 0)) {
-+		si->gpioidx = sb_findcoreidx(si, SB_EXTIF, 0);
-+		si->gpioid = SB_EXTIF;
-+	} else
-+		ASSERT(si->gpioidx != BADIDX);
++	PKTSETLINK(p, NULL);
 +
-+	/* return to original core index */
-+	sb_setcoreidx(&si->sb, origidx);
++	return p;
 +}
 +
-+/* may be called with core in reset */
-+void
-+sb_detach(sb_t *sbh)
-+{
-+	sb_info_t *si;
-+	uint idx;
++#endif
 +
-+	si = SB_INFO(sbh);
 +
-+	if (si == NULL)
-+		return;
+diff -Naur linux.old/drivers/net/wl2/hnddma.c linux.dev/drivers/net/wl2/hnddma.c
+--- linux.old/drivers/net/wl2/hnddma.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl2/hnddma.c	2006-04-06 16:32:33.000000000 +0200
+@@ -0,0 +1,1487 @@
++/*
++ * Generic Broadcom Home Networking Division (HND) DMA module.
++ * This supports the following chips: BCM42xx, 44xx, 47xx .
++ *
++ * Copyright 2005, Broadcom Corporation
++ * All Rights Reserved.
++ * 
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
++ *
++ * $Id$
++ */
 +
-+	if (BUSTYPE(si->sb.bustype) == SB_BUS)
-+		for (idx = 0; idx < SB_MAXCORES; idx++)
-+			if (si->regs[idx]) {
-+				REG_UNMAP(si->regs[idx]);
-+				si->regs[idx] = NULL;
-+			}
++#include <typedefs.h>
++#include <osl.h>
++#include <bcmendian.h>
++#include <sbconfig.h>
++#include <bcmutils.h>
++#include <bcmdevs.h>
++#include <sbutils.h>
 +
-+	if (si != &ksi)
-+		MFREE(si->osh, si, sizeof (sb_info_t));
-+}
++struct dma_info;	/* forward declaration */
++#define di_t struct dma_info
 +
-+/* use pci dev id to determine chip id for chips not having a chipcommon core */
-+static uint
-+BCMINITFN(sb_pcidev2chip)(uint pcidev)
-+{
-+	if ((pcidev >= BCM4710_DEVICE_ID) && (pcidev <= BCM47XX_USB_ID))
-+		return (BCM4710_DEVICE_ID);
-+	if ((pcidev >= BCM4402_DEVICE_ID) && (pcidev <= BCM4402_V90_ID))
-+		return (BCM4402_DEVICE_ID);
-+	if (pcidev == BCM4401_ENET_ID)
-+		return (BCM4402_DEVICE_ID);
-+	if ((pcidev >= BCM4307_V90_ID) && (pcidev <= BCM4307_D11B_ID))
-+		return (BCM4307_DEVICE_ID);
-+	if (pcidev == BCM4301_DEVICE_ID)
-+		return (BCM4301_DEVICE_ID);
++#include "sbhnddma.h"
++#include "hnddma.h"
 +
-+	return (0);
-+}
++/* debug/trace */
++#define	DMA_ERROR(args)
++#define	DMA_TRACE(args)
 +
-+/* convert chip number to number of i/o cores */
-+static uint
-+BCMINITFN(sb_chip2numcores)(uint chip)
-+{
-+	if (chip == BCM4710_DEVICE_ID)
-+		return (9);
-+	if (chip == BCM4402_DEVICE_ID)
-+		return (3);
-+	if ((chip == BCM4301_DEVICE_ID) || (chip == BCM4307_DEVICE_ID))
-+		return (5);
-+	if (chip == BCM4306_DEVICE_ID)	/* < 4306c0 */
-+		return (6);
-+	if (chip == BCM4704_DEVICE_ID)
-+		return (9);
-+	if (chip == BCM5365_DEVICE_ID)
-+		return (7);
++/* default dma message level (if input msg_level pointer is null in dma_attach()) */
++static uint dma_msg_level =
++	0;
 +
-+	SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
-+	ASSERT(0);
-+	return (1);
-+}
++#define	MAXNAMEL	8
 +
-+/* return index of coreid or BADIDX if not found */
-+static uint
-+sb_findcoreidx( sb_info_t *si, uint coreid, uint coreunit)
-+{
-+	uint found;
-+	uint i;
++/* dma engine software state */
++typedef struct dma_info {
++	hnddma_t	hnddma;		/* exported structure */
++	uint		*msg_level;	/* message level pointer */
++	char		name[MAXNAMEL];	/* callers name for diag msgs */
++	
++	void		*osh;		/* os handle */
++	sb_t		*sbh;		/* sb handle */
++	
++	bool		dma64;		/* dma64 enabled */
++	bool		addrext;	/* this dma engine supports DmaExtendedAddrChanges */
++	
++	dma32regs_t	*d32txregs;	/* 32 bits dma tx engine registers */
++	dma32regs_t	*d32rxregs;	/* 32 bits dma rx engine registers */
++	dma64regs_t	*d64txregs;	/* 64 bits dma tx engine registers */
++	dma64regs_t	*d64rxregs;	/* 64 bits dma rx engine registers */
 +
-+	found = 0;
++	uint32		dma64align;	/* either 8k or 4k depends on number of dd */
++	dma32dd_t	*txd32;		/* pointer to dma32 tx descriptor ring */
++	dma64dd_t	*txd64;		/* pointer to dma64 tx descriptor ring */
++	uint		ntxd;		/* # tx descriptors tunable */	
++	uint		txin;		/* index of next descriptor to reclaim */
++	uint		txout;		/* index of next descriptor to post */
++	uint		txavail;	/* # free tx descriptors */
++	void		**txp;		/* pointer to parallel array of pointers to packets */
++	ulong		txdpa;		/* physical address of descriptor ring */
++	uint		txdalign;	/* #bytes added to alloc'd mem to align txd */
++	uint		txdalloc;	/* #bytes allocated for the ring */
 +
-+	for (i = 0; i < si->numcores; i++)
-+		if (si->coreid[i] == coreid) {
-+			if (found == coreunit)
-+				return (i);
-+			found++;
-+		}
++	dma32dd_t	*rxd32;		/* pointer to dma32 rx descriptor ring */
++	dma64dd_t	*rxd64;		/* pointer to dma64 rx descriptor ring */
++	uint		nrxd;		/* # rx descriptors tunable */	
++	uint		rxin;		/* index of next descriptor to reclaim */
++	uint		rxout;		/* index of next descriptor to post */
++	void		**rxp;		/* pointer to parallel array of pointers to packets */
++	ulong		rxdpa;		/* physical address of descriptor ring */
++	uint		rxdalign;	/* #bytes added to alloc'd mem to align rxd */
++	uint		rxdalloc;	/* #bytes allocated for the ring */
 +
-+	return (BADIDX);
-+}
++	/* tunables */
++	uint		rxbufsize;	/* rx buffer size in bytes */
++	uint		nrxpost;	/* # rx buffers to keep posted */
++	uint		rxoffset;	/* rxcontrol offset */
++	uint		ddoffsetlow;	/* add to get dma address of descriptor ring, low 32 bits */
++	uint		ddoffsethigh;	/* add to get dma address of descriptor ring, high 32 bits */
++	uint		dataoffsetlow;	/* add to get dma address of data buffer, low 32 bits */
++	uint		dataoffsethigh;	/* add to get dma address of data buffer, high 32 bits */
++} dma_info_t;
 +
-+/* 
-+ * this function changes logical "focus" to the indiciated core, 
-+ * must be called with interrupt off.
-+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
-+ */
-+void*
-+sb_setcoreidx(sb_t *sbh, uint coreidx)
-+{
-+	sb_info_t *si;
-+	uint32 sbaddr;
-+	uint8 tmp;
++#ifdef BCMDMA64
++#define	DMA64_ENAB(di)	((di)->dma64)
++#else
++#define	DMA64_ENAB(di)	(0)
++#endif
 +
-+	si = SB_INFO(sbh);
++/* descriptor bumping macros */
++#define	XXD(x, n)	((x) & ((n) - 1))
++#define	TXD(x)		XXD((x), di->ntxd)
++#define	RXD(x)		XXD((x), di->nrxd)
++#define	NEXTTXD(i)	TXD(i + 1)
++#define	PREVTXD(i)	TXD(i - 1)
++#define	NEXTRXD(i)	RXD(i + 1)
++#define	NTXDACTIVE(h, t)	TXD(t - h)
++#define	NRXDACTIVE(h, t)	RXD(t - h)
 +
-+	if (coreidx >= si->numcores)
-+		return (NULL);
-+	
-+	/*
-+	 * If the user has provided an interrupt mask enabled function,
-+	 * then assert interrupts are disabled before switching the core.
-+	 */
-+	ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg));
++/* macros to convert between byte offsets and indexes */
++#define	B2I(bytes, type)	((bytes) / sizeof(type))
++#define	I2B(index, type)	((index) * sizeof(type))
 +
-+	sbaddr = SB_ENUM_BASE + (coreidx * SB_CORE_SIZE);
++#define	PCI32ADDR_HIGH		0xc0000000	/* address[31:30] */
++#define	PCI32ADDR_HIGH_SHIFT	30
 +
-+	switch (BUSTYPE(si->sb.bustype)) {
-+	case SB_BUS:
-+		/* map new one */
-+		if (!si->regs[coreidx]) {
-+			si->regs[coreidx] = (void*)REG_MAP(sbaddr, SB_CORE_SIZE);
-+			ASSERT(GOODREGS(si->regs[coreidx]));
-+		}
-+		si->curmap = si->regs[coreidx];
-+		break;
 +
-+	case PCI_BUS:
-+		/* point bar0 window */
-+		OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
-+		break;
++/* prototypes */
++static bool dma_isaddrext(dma_info_t *di);
++static bool dma_alloc(dma_info_t *di, uint direction);
 +
-+	case PCMCIA_BUS:
-+		tmp = (sbaddr >> 12) & 0x0f;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
-+		tmp = (sbaddr >> 16) & 0xff;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
-+		tmp = (sbaddr >> 24) & 0xff;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
-+		break;
-+#ifdef BCMJTAG
-+	case JTAG_BUS:
-+		/* map new one */
-+		if (!si->regs[coreidx]) {
-+			si->regs[coreidx] = (void *)sbaddr;
-+			ASSERT(GOODREGS(si->regs[coreidx]));
-+		}
-+		si->curmap = si->regs[coreidx];
-+		break;
-+#endif	/* BCMJTAG */
-+	}
++static bool dma32_alloc(dma_info_t *di, uint direction);
++static void dma32_txreset(dma_info_t *di);
++static void dma32_rxreset(dma_info_t *di);
++static bool dma32_txsuspendedidle(dma_info_t *di);
++static int  dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags);
++static void* dma32_getnexttxp(dma_info_t *di, bool forceall);
++static void* dma32_getnextrxp(dma_info_t *di, bool forceall);
++static void dma32_txrotate(di_t *di);
 +
-+	si->curidx = coreidx;
++/* prototype or stubs */
++#ifdef BCMDMA64
++static bool dma64_alloc(dma_info_t *di, uint direction);
++static void dma64_txreset(dma_info_t *di);
++static void dma64_rxreset(dma_info_t *di);
++static bool dma64_txsuspendedidle(dma_info_t *di);
++static int  dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags);
++static void* dma64_getnexttxp(dma_info_t *di, bool forceall);
++static void* dma64_getnextrxp(dma_info_t *di, bool forceall);
++static void dma64_txrotate(di_t *di);
++#else
++static bool dma64_alloc(dma_info_t *di, uint direction) { return TRUE; }
++static void dma64_txreset(dma_info_t *di) {}
++static void dma64_rxreset(dma_info_t *di) {}
++static bool dma64_txsuspendedidle(dma_info_t *di) { return TRUE;}
++static int  dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags) { return 0; }
++static void* dma64_getnexttxp(dma_info_t *di, bool forceall) { return NULL; }
++static void* dma64_getnextrxp(dma_info_t *di, bool forceall) { return NULL; }
++static void dma64_txrotate(di_t *di) { return; }
++#endif
 +
-+	return (si->curmap);
-+}
 +
-+/* 
-+ * this function changes logical "focus" to the indiciated core, 
-+ * must be called with interrupt off.
-+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
-+ */
-+void*
-+sb_setcore(sb_t *sbh, uint coreid, uint coreunit)
++
++void* 
++dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx,
++	   uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level)
 +{
-+	sb_info_t *si;
-+	uint idx;
++	dma_info_t *di;
++	uint size;
 +
-+	si = SB_INFO(sbh);
-+	idx = sb_findcoreidx(si, coreid, coreunit);
-+	if (!GOODIDX(idx))
++	/* allocate private info structure */
++	if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) {
 +		return (NULL);
++	}
++	bzero((char*)di, sizeof (dma_info_t));
 +
-+	return (sb_setcoreidx(sbh, idx));
-+}
++	di->msg_level = msg_level ? msg_level : &dma_msg_level;
 +
-+/* return chip number */
-+uint
-+BCMINITFN(sb_chip)(sb_t *sbh)
-+{
-+	sb_info_t *si;
++	if (sbh != NULL)
++		di->dma64 = ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64);
 +
-+	si = SB_INFO(sbh);
-+	return (si->sb.chip);
-+}
++#ifndef BCMDMA64
++	if (di->dma64) {
++		DMA_ERROR(("dma_attach: driver doesn't have the capability to support 64 bits DMA\n"));
++		goto fail;
++	}
++#endif
++	
++	/* check arguments */
++	ASSERT(ISPOWEROF2(ntxd));
++	ASSERT(ISPOWEROF2(nrxd));
++	if (nrxd == 0)
++		ASSERT(dmaregsrx == NULL);
++	if (ntxd == 0)
++		ASSERT(dmaregstx == NULL);
 +
-+/* return chip revision number */
-+uint
-+BCMINITFN(sb_chiprev)(sb_t *sbh)
-+{
-+	sb_info_t *si;
 +
-+	si = SB_INFO(sbh);
-+	return (si->sb.chiprev);
-+}
++	/* init dma reg pointer */
++	if (di->dma64) {
++		ASSERT(ntxd <= D64MAXDD);
++		ASSERT(nrxd <= D64MAXDD);
++		di->d64txregs = (dma64regs_t *)dmaregstx;
++		di->d64rxregs = (dma64regs_t *)dmaregsrx;
 +
-+/* return chip common revision number */
-+uint
-+BCMINITFN(sb_chipcrev)(sb_t *sbh)
-+{
-+	sb_info_t *si;
++		di->dma64align = D64RINGALIGN;
++		if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
++			/* for smaller dd table, HW relax the alignment requirement */
++			di->dma64align = D64RINGALIGN / 2;
++		}
++	} else {
++		ASSERT(ntxd <= D32MAXDD);
++		ASSERT(nrxd <= D32MAXDD);
++		di->d32txregs = (dma32regs_t *)dmaregstx;
++		di->d32rxregs = (dma32regs_t *)dmaregsrx;
++	}
 +
-+	si = SB_INFO(sbh);
-+	return (si->sb.ccrev);
-+}
 +
-+/* return chip package option */
-+uint
-+BCMINITFN(sb_chippkg)(sb_t *sbh)
-+{
-+	sb_info_t *si;
++	/* make a private copy of our callers name */
++	strncpy(di->name, name, MAXNAMEL);
++	di->name[MAXNAMEL-1] = '\0';
++
++	di->osh = osh;
++	di->sbh = sbh;
++
++	/* save tunables */
++	di->ntxd = ntxd;
++	di->nrxd = nrxd;
++	di->rxbufsize = rxbufsize;
++	di->nrxpost = nrxpost;
++	di->rxoffset = rxoffset;
++
++	/* 
++	 * figure out the DMA physical address offset for dd and data 
++	 *   for old chips w/o sb, use zero
++	 *   for new chips w sb, 
++	 *     PCI/PCIE: they map silicon backplace address to zero based memory, need offset
++	 *     Other bus: use zero
++	 *     SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
++	 */
++	di->ddoffsetlow = 0;
++	di->dataoffsetlow = 0;
++	if (sbh != NULL) {	
++		if (sbh->bustype == PCI_BUS) {  /* for pci bus, add offset */
++			if ((sbh->buscoretype == SB_PCIE) && di->dma64){
++				di->ddoffsetlow = 0;
++				di->ddoffsethigh = SB_PCIE_DMA_H32;
++			} else {
++				di->ddoffsetlow = SB_PCI_DMA;
++				di->ddoffsethigh = 0;
++			}
++			di->dataoffsetlow =  di->ddoffsetlow;
++			di->dataoffsethigh =  di->ddoffsethigh;
++		} 
++#if defined(__mips__) && defined(IL_BIGENDIAN)
++		/* use sdram swapped region for data buffers but not dma descriptors */
++		di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED;
++#endif
++	}
 +
-+	si = SB_INFO(sbh);
-+	return (si->sb.chippkg);
-+}
++	di->addrext = dma_isaddrext(di);
 +
-+/* return PCI core rev. */
-+uint
-+BCMINITFN(sb_pcirev)(sb_t *sbh)
-+{
-+	sb_info_t *si;
++	DMA_TRACE(("%s: dma_attach: osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d rxoffset %d ddoffset 0x%x dataoffset 0x%x\n", 
++		   name, osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, di->ddoffsetlow, di->dataoffsetlow));
 +
-+	si = SB_INFO(sbh);
-+	return (si->sb.buscorerev);
-+}
++	/* allocate tx packet pointer vector */
++	if (ntxd) {
++		size = ntxd * sizeof (void*);
++		if ((di->txp = MALLOC(osh, size)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
++			goto fail;
++		}
++		bzero((char*)di->txp, size);
++	}
 +
-+bool
-+BCMINITFN(sb_war16165)(sb_t *sbh)
-+{
-+	sb_info_t *si;
++	/* allocate rx packet pointer vector */
++	if (nrxd) {
++		size = nrxd * sizeof (void*);
++		if ((di->rxp = MALLOC(osh, size)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
++			goto fail;
++		}
++		bzero((char*)di->rxp, size);
++	} 
 +
-+	si = SB_INFO(sbh);
++	/* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
++	if (ntxd) {
++		if (!dma_alloc(di, DMA_TX))
++			goto fail;
++	}
 +
-+	return (PCI(si) && (si->sb.buscorerev <= 10));
-+}
++	/* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
++	if (nrxd) {
++		if (!dma_alloc(di, DMA_RX))
++			goto fail;
++	}
 +
-+static void 
-+BCMINITFN(sb_war30841)(sb_info_t *si)
-+{
-+	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
-+	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
-+	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
-+}
++	if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) {
++		DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n", di->name, di->txdpa));
++		goto fail;
++	}
++	if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) {
++		DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n", di->name, di->rxdpa));
++		goto fail;
++	}
 +
-+/* return PCMCIA core rev. */
-+uint
-+BCMINITFN(sb_pcmciarev)(sb_t *sbh)
-+{
-+	sb_info_t *si;
++	return ((void*)di);
 +
-+	si = SB_INFO(sbh);
-+	return (si->sb.buscorerev);
++fail:
++	dma_detach((void*)di);
++	return (NULL);
 +}
 +
-+/* return board vendor id */
-+uint
-+BCMINITFN(sb_boardvendor)(sb_t *sbh)
++static bool
++dma_alloc(dma_info_t *di, uint direction)
 +{
-+	sb_info_t *si;
-+
-+	si = SB_INFO(sbh);
-+	return (si->sb.boardvendor);
++	if (DMA64_ENAB(di)) {
++		return dma64_alloc(di, direction);
++	} else {
++		return dma32_alloc(di, direction);
++	}
 +}
 +
-+/* return boardtype */
-+uint
-+BCMINITFN(sb_boardtype)(sb_t *sbh)
++/* may be called with core in reset */
++void
++dma_detach(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	char *var;
-+
-+	si = SB_INFO(sbh);
++	if (di == NULL)
++		return;
 +
-+	if (BUSTYPE(si->sb.bustype) == SB_BUS && si->sb.boardtype == 0xffff) {
-+		/* boardtype format is a hex string */
-+		si->sb.boardtype = getintvar(NULL, "boardtype");
++	DMA_TRACE(("%s: dma_detach\n", di->name));
 +
-+		/* backward compatibility for older boardtype string format */
-+		if ((si->sb.boardtype == 0) && (var = getvar(NULL, "boardtype"))) {
-+			if (!strcmp(var, "bcm94710dev"))
-+				si->sb.boardtype = BCM94710D_BOARD;
-+			else if (!strcmp(var, "bcm94710ap"))
-+				si->sb.boardtype = BCM94710AP_BOARD;
-+			else if (!strcmp(var, "bu4710"))
-+				si->sb.boardtype = BU4710_BOARD;
-+			else if (!strcmp(var, "bcm94702mn"))
-+				si->sb.boardtype = BCM94702MN_BOARD;
-+			else if (!strcmp(var, "bcm94710r1"))
-+				si->sb.boardtype = BCM94710R1_BOARD;
-+			else if (!strcmp(var, "bcm94710r4"))
-+				si->sb.boardtype = BCM94710R4_BOARD;
-+			else if (!strcmp(var, "bcm94702cpci"))
-+				si->sb.boardtype = BCM94702CPCI_BOARD;
-+			else if (!strcmp(var, "bcm95380_rr"))
-+				si->sb.boardtype = BCM95380RR_BOARD;
-+		}
-+	}
++	/* shouldn't be here if descriptors are unreclaimed */
++	ASSERT(di->txin == di->txout);
++	ASSERT(di->rxin == di->rxout);
 +
-+	return (si->sb.boardtype);
-+}
++	/* free dma descriptor rings */
++	if (di->txd32)
++		DMA_FREE_CONSISTENT(di->osh, ((int8*)di->txd32 - di->txdalign), di->txdalloc, (di->txdpa - di->txdalign));
++	if (di->rxd32)
++		DMA_FREE_CONSISTENT(di->osh, ((int8*)di->rxd32 - di->rxdalign), di->rxdalloc, (di->rxdpa - di->rxdalign));
 +
-+/* return bus type of sbh device */
-+uint
-+sb_bus(sb_t *sbh)
-+{
-+	sb_info_t *si;
++	/* free packet pointer vectors */
++	if (di->txp)
++		MFREE(di->osh, (void*)di->txp, (di->ntxd * sizeof (void*)));
++	if (di->rxp)
++		MFREE(di->osh, (void*)di->rxp, (di->nrxd * sizeof (void*)));
 +
-+	si = SB_INFO(sbh);
-+	return (si->sb.bustype);
++	/* free our private info structure */
++	MFREE(di->osh, (void*)di, sizeof (dma_info_t));
 +}
 +
-+/* return bus core type */
-+uint
-+sb_buscoretype(sb_t *sbh)
++/* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
++static bool
++dma_isaddrext(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+
-+	si = SB_INFO(sbh);
++	uint32 w;
 +
-+	return (si->sb.buscoretype);
++	if (DMA64_ENAB(di)) {
++		OR_REG(&di->d64txregs->control, D64_XC_AE);
++		w = R_REG(&di->d32txregs->control);
++		AND_REG(&di->d32txregs->control, ~D64_XC_AE);
++		return ((w & XC_AE) == D64_XC_AE);
++	} else {
++		OR_REG(&di->d32txregs->control, XC_AE);
++		w = R_REG(&di->d32txregs->control);
++		AND_REG(&di->d32txregs->control, ~XC_AE);
++		return ((w & XC_AE) == XC_AE);
++	}
 +}
 +
-+/* return bus core revision */
-+uint
-+sb_buscorerev(sb_t *sbh)
++void
++dma_txreset(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	si = SB_INFO(sbh);
++	DMA_TRACE(("%s: dma_txreset\n", di->name));
 +
-+	return (si->sb.buscorerev);
++	if (DMA64_ENAB(di))
++		dma64_txreset(di);
++	else
++		dma32_txreset(di);
 +}
 +
-+/* return list of found cores */
-+uint
-+sb_corelist(sb_t *sbh, uint coreid[])
++void
++dma_rxreset(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+
-+	si = SB_INFO(sbh);
++	DMA_TRACE(("%s: dma_rxreset\n", di->name));
 +
-+	bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof (uint)));
-+	return (si->numcores);
++	if (DMA64_ENAB(di))
++		dma64_rxreset(di);
++	else
++		dma32_rxreset(di);
 +}
 +
-+/* return current register mapping */
-+void *
-+sb_coreregs(sb_t *sbh)
++/* initialize descriptor table base address */
++static void
++dma_ddtable_init(dma_info_t *di, uint direction, ulong pa)
 +{
-+	sb_info_t *si;
-+
-+	si = SB_INFO(sbh);
-+	ASSERT(GOODREGS(si->curmap));
-+
-+	return (si->curmap);
++	if (DMA64_ENAB(di)) {
++		if (direction == DMA_TX) {
++			W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
++			W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
++		} else {
++			W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
++			W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
++		}
++	} else {
++		uint32 offset = di->ddoffsetlow;
++		if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
++			if (direction == DMA_TX)	
++				W_REG(&di->d32txregs->addr, (pa + offset));
++			else
++				W_REG(&di->d32rxregs->addr, (pa + offset));
++		} else {        
++			/* dma32 address extension */
++			uint32 ae;
++			ASSERT(di->addrext);
++			ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
++	
++			if (direction == DMA_TX) {
++				W_REG(&di->d32txregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset));
++				SET_REG(&di->d32txregs->control, XC_AE, (ae << XC_AE_SHIFT));
++			} else {
++				W_REG(&di->d32rxregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset));
++				SET_REG(&di->d32rxregs->control, RC_AE, (ae << RC_AE_SHIFT));
++			}
++		}
++	}
 +}
 +
-+
-+/* do buffered registers update */
-+void
-+sb_commit(sb_t *sbh)
++/* init the tx or rx descriptor */
++static INLINE void
++dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, ulong pa, uint outidx, uint32 *ctrl)
 +{
-+	sb_info_t *si;
-+	uint origidx;
-+	uint intr_val = 0;
-+
-+	si = SB_INFO(sbh);
-+
-+	origidx = si->curidx;
-+	ASSERT(GOODIDX(origidx));
++	uint offset = di->dataoffsetlow;
 +
-+	INTR_OFF(si, intr_val);
++	if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
++		W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + offset));
++		W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*ctrl));
++	} else {        
++		/* address extension */
++		uint32 ae;
++		ASSERT(di->addrext);
++		ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
 +
-+	/* switch over to chipcommon core if there is one, else use pci */
-+	if (si->sb.ccrev != NOREV) {
-+		chipcregs_t *ccregs = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
++		*ctrl |= (ae << CTRL_AE_SHIFT);
++		W_SM(&ddring[outidx].addr, BUS_SWAP32((pa & ~PCI32ADDR_HIGH) + offset));
++		W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*ctrl));
++	}
++}
 +
-+		/* do the buffer registers update */
-+		W_REG(&ccregs->broadcastaddress, SB_COMMIT);
-+		W_REG(&ccregs->broadcastdata, 0x0);
-+	} else if (PCI(si)) {
-+		sbpciregs_t *pciregs = (sbpciregs_t *)sb_setcore(sbh, SB_PCI, 0);
++/* init the tx or rx descriptor */
++static INLINE void
++dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, ulong pa, uint outidx, uint32 *flags, uint32 bufcount)
++{
++	uint32 bufaddr_low = pa + di->dataoffsetlow;
++	uint32 bufaddr_high = 0 + di->dataoffsethigh;
 +
-+		/* do the buffer registers update */
-+		W_REG(&pciregs->bcastaddr, SB_COMMIT);
-+		W_REG(&pciregs->bcastdata, 0x0);
-+	} else
-+		ASSERT(0);
++	uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
 +
-+	/* restore core index */
-+	sb_setcoreidx(sbh, origidx);
-+	INTR_RESTORE(si, intr_val);
++	W_SM(&ddring[outidx].addrlow, BUS_SWAP32(bufaddr_low));
++	W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(bufaddr_high));
++	W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
++	W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
 +}
 +
-+/* reset and re-enable a core */
 +void
-+sb_core_reset(sb_t *sbh, uint32 bits)
++dma_txinit(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
-+	volatile uint32 dummy;
-+
-+	si = SB_INFO(sbh);
-+	ASSERT(GOODREGS(si->curmap));
-+	sb = REGS2SB(si->curmap);
-+
-+	/*
-+	 * Must do the disable sequence first to work for arbitrary current core state.
-+	 */
-+	sb_core_disable(sbh, bits);
-+
-+	/*
-+	 * Now do the initialization sequence.
-+	 */
++	DMA_TRACE(("%s: dma_txinit\n", di->name));
 +
-+	/* set reset while enabling the clock and forcing them on throughout the core */
-+	W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits));
-+	dummy = R_SBREG(si, &sb->sbtmstatelow);
-+	OSL_DELAY(1);
++	di->txin = di->txout = 0;
++	di->txavail = di->ntxd - 1;
 +
-+	if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) {
-+		W_SBREG(si, &sb->sbtmstatehigh, 0);
-+	}
-+	if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
-+		AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
++	/* clear tx descriptor ring */
++	if (DMA64_ENAB(di)) {
++		BZERO_SM((void*)di->txd64, (di->ntxd * sizeof (dma64dd_t)));
++		W_REG(&di->d64txregs->control, XC_XE);
++		dma_ddtable_init(di, DMA_TX, di->txdpa);
++	} else {
++		BZERO_SM((void*)di->txd32, (di->ntxd * sizeof (dma32dd_t)));
++		W_REG(&di->d32txregs->control, XC_XE);
++		dma_ddtable_init(di, DMA_TX, di->txdpa);
 +	}
-+
-+	/* clear reset and allow it to propagate throughout the core */
-+	W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
-+	dummy = R_SBREG(si, &sb->sbtmstatelow);
-+	OSL_DELAY(1);
-+
-+	/* leave clock enabled */
-+	W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits));
-+	dummy = R_SBREG(si, &sb->sbtmstatelow);
-+	OSL_DELAY(1);
 +}
 +
-+void
-+sb_core_tofixup(sb_t *sbh)
++bool
++dma_txenabled(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
-+
-+	si = SB_INFO(sbh);
-+
-+	if ( (BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) || (PCI(si) && (si->sb.buscorerev >= 5)) )
-+		return;
-+
-+	ASSERT(GOODREGS(si->curmap));
-+	sb = REGS2SB(si->curmap);
-+
-+	if (BUSTYPE(si->sb.bustype) == SB_BUS) {
-+		SET_SBREG(si, &sb->sbimconfiglow,
-+			  SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
-+			  (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
++	uint32 xc;
++	
++	/* If the chip is dead, it is not enabled :-) */
++	if (DMA64_ENAB(di)) {
++		xc = R_REG(&di->d64txregs->control);
++		return ((xc != 0xffffffff) && (xc & D64_XC_XE));
 +	} else {
-+		if (sb_coreid(sbh) == SB_PCI) {
-+			SET_SBREG(si, &sb->sbimconfiglow,
-+				  SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
-+				  (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
-+		} else {
-+			SET_SBREG(si, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
-+		}
++		xc = R_REG(&di->d32txregs->control);
++		return ((xc != 0xffffffff) && (xc & XC_XE));
 +	}
-+
-+	sb_commit(sbh);
 +}
 +
-+/*
-+ * Set the initiator timeout for the "master core".
-+ * The master core is defined to be the core in control
-+ * of the chip and so it issues accesses to non-memory
-+ * locations (Because of dma *any* core can access memeory).
-+ *
-+ * The routine uses the bus to decide who is the master:
-+ *	SB_BUS => mips
-+ *	JTAG_BUS => chipc
-+ *	PCI_BUS => pci or pcie
-+ *	PCMCIA_BUS => pcmcia
-+ *	SDIO_BUS => pcmcia
-+ *
-+ * This routine exists so callers can disable initiator
-+ * timeouts so accesses to very slow devices like otp
-+ * won't cause an abort. The routine allows arbitrary
-+ * settings of the service and request timeouts, though.
-+ *
-+ * Returns the timeout state before changing it or -1
-+ * on error.
-+ */
-+
-+#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
-+
-+uint32
-+sb_set_initiator_to(sb_t *sbh, uint32 to)
++void
++dma_txsuspend(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	uint origidx, idx;
-+	uint intr_val = 0;
-+	uint32 tmp, ret = 0xffffffff;
-+	sbconfig_t *sb;
-+
-+	si = SB_INFO(sbh);
-+
-+	if ((to & ~TO_MASK) != 0)
-+		return ret;
++	DMA_TRACE(("%s: dma_txsuspend\n", di->name));
++	if (DMA64_ENAB(di))
++		OR_REG(&di->d64txregs->control, D64_XC_SE);
++	else
++		OR_REG(&di->d32txregs->control, XC_SE);
++}
 +
-+	/* Figure out the master core */
-+	idx = BADIDX;
-+	switch (BUSTYPE(si->sb.bustype)) {
-+	case PCI_BUS:
-+		idx = si->sb.buscoreidx; 
-+		break;
-+	case JTAG_BUS:
-+		idx = SB_CC_IDX;
-+		break;
-+	case PCMCIA_BUS:
-+	case SDIO_BUS:
-+		idx = sb_findcoreidx(si, SB_PCMCIA, 0);
-+		break;
-+	case SB_BUS:
-+		if ((idx = sb_findcoreidx(si, SB_MIPS33, 0)) == BADIDX)
-+			idx = sb_findcoreidx(si, SB_MIPS, 0);
-+		break;
-+	default:
-+		ASSERT(0);
-+	}
-+	if (idx == BADIDX)
-+		return ret;
++void
++dma_txresume(dma_info_t *di)
++{
++	DMA_TRACE(("%s: dma_txresume\n", di->name));
++	if (DMA64_ENAB(di))
++		AND_REG(&di->d64txregs->control, ~D64_XC_SE);
++	else
++		AND_REG(&di->d32txregs->control, ~XC_SE);
++}
 +
-+	INTR_OFF(si, intr_val);
-+	origidx = sb_coreidx(sbh);
++bool
++dma_txsuspendedidle(dma_info_t *di)
++{
++	if (DMA64_ENAB(di))
++		return dma64_txsuspendedidle(di);
++	else
++		return dma32_txsuspendedidle(di);
++}
 +
-+	sb = REGS2SB(sb_setcoreidx(sbh, idx));
++bool
++dma_txsuspended(dma_info_t *di)
++{
++	if (DMA64_ENAB(di))
++		return ((R_REG(&di->d64txregs->control) & D64_XC_SE) == D64_XC_SE);
++	else
++		return ((R_REG(&di->d32txregs->control) & XC_SE) == XC_SE);
++}
 +
-+	tmp = R_SBREG(si, &sb->sbimconfiglow);
-+	ret = tmp & TO_MASK;
-+	W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
++bool
++dma_txstopped(dma_info_t *di)
++{
++	if (DMA64_ENAB(di))
++		return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_STOPPED);
++	else
++		return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_STOPPED);
++}
 +
-+	sb_commit(sbh);
-+	sb_setcoreidx(sbh, origidx);
-+	INTR_RESTORE(si, intr_val);
-+	return ret;
++bool
++dma_rxstopped(dma_info_t *di)
++{
++	if (DMA64_ENAB(di))
++		return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) == D64_RS0_RS_STOPPED);
++	else
++		return ((R_REG(&di->d32rxregs->status) & RS_RS_MASK) == RS_RS_STOPPED);
 +}
 +
 +void
-+sb_core_disable(sb_t *sbh, uint32 bits)
++dma_fifoloopbackenable(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	volatile uint32 dummy;
-+	uint32 rej;
-+	sbconfig_t *sb;
-+
-+	si = SB_INFO(sbh);
-+
-+	ASSERT(GOODREGS(si->curmap));
-+	sb = REGS2SB(si->curmap);
-+
-+	/* if core is already in reset, just return */
-+	if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET)
-+		return;
-+
-+	/* reject value changed between sonics 2.2 and 2.3 */
-+	if (si->sb.sonicsrev == SONICS_2_2)
-+		rej = (1 << SBTML_REJ_SHIFT);
++	DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
++	if (DMA64_ENAB(di))
++		OR_REG(&di->d64txregs->control, D64_XC_LE);
 +	else
-+		rej = (2 << SBTML_REJ_SHIFT);
++		OR_REG(&di->d32txregs->control, XC_LE);
++}
 +
-+	/* if clocks are not enabled, put into reset and return */
-+	if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0)
-+		goto disable;
++void
++dma_rxinit(dma_info_t *di)
++{
++	DMA_TRACE(("%s: dma_rxinit\n", di->name));
 +
-+	/* set target reject and spin until busy is clear (preserve core-specific bits) */
-+	OR_SBREG(si, &sb->sbtmstatelow, rej);
-+	dummy = R_SBREG(si, &sb->sbtmstatelow);
-+	OSL_DELAY(1);
-+	SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
++	di->rxin = di->rxout = 0;
 +
-+ 	if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) {
-+		OR_SBREG(si, &sb->sbimstate, SBIM_RJ);
-+		dummy = R_SBREG(si, &sb->sbimstate);
-+		OSL_DELAY(1);
-+		SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000);
++	/* clear rx descriptor ring */
++	if (DMA64_ENAB(di)) {
++                BZERO_SM((void*)di->rxd64, (di->nrxd * sizeof (dma64dd_t)));
++		dma_rxenable(di);
++		dma_ddtable_init(di, DMA_RX, di->rxdpa);
++	} else {
++		BZERO_SM((void*)di->rxd32, (di->nrxd * sizeof (dma32dd_t)));
++		dma_rxenable(di);
++		dma_ddtable_init(di, DMA_RX, di->rxdpa);
 +	}
++}
 +
-+	/* set reset and reject while enabling the clocks */
-+	W_SBREG(si, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET));
-+	dummy = R_SBREG(si, &sb->sbtmstatelow);
-+	OSL_DELAY(10);
++void
++dma_rxenable(dma_info_t *di)
++{
++	DMA_TRACE(("%s: dma_rxenable\n", di->name));
++	if (DMA64_ENAB(di))
++		W_REG(&di->d64rxregs->control, ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
++	else
++		W_REG(&di->d32rxregs->control, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
++}
 +
-+	/* don't forget to clear the initiator reject bit */
-+	if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT)
-+		AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);
++bool
++dma_rxenabled(dma_info_t *di)
++{
++	uint32 rc;
 +
-+disable:
-+	/* leave reset and reject asserted */
-+	W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET));
-+	OSL_DELAY(1);
++	if (DMA64_ENAB(di)) { 
++		rc = R_REG(&di->d64rxregs->control);
++		return ((rc != 0xffffffff) && (rc & D64_RC_RE));
++	} else {
++		rc = R_REG(&di->d32rxregs->control);
++		return ((rc != 0xffffffff) && (rc & RC_RE));
++	}
 +}
 +
-+/* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
-+void
-+sb_watchdog(sb_t *sbh, uint ticks)
-+{
-+	sb_info_t *si = SB_INFO(sbh);
 +
-+	/* instant NMI */
-+	switch (si->gpioid) {
-+	case SB_CC:
-+		sb_corereg(si, 0, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
-+		break;
-+	case SB_EXTIF:
-+		sb_corereg(si, si->gpioidx, OFFSETOF(extifregs_t, watchdog), ~0, ticks);
-+		break;
++/* !! tx entry routine */
++int
++dma_txfast(dma_info_t *di, void *p0, uint32 coreflags)
++{
++	if (DMA64_ENAB(di)) { 
++		return dma64_txfast(di, p0, coreflags);
++	} else {
++		return dma32_txfast(di, p0, coreflags);
 +	}
 +}
 +
-+/* initialize the pcmcia core */
-+void
-+sb_pcmcia_init(sb_t *sbh)
++/* !! rx entry routine, returns a pointer to the next frame received, or NULL if there are no more */
++void*
++dma_rx(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	uint8 cor;
++	void *p;
++	uint len;
++	int skiplen = 0;
 +
-+	si = SB_INFO(sbh);
++	while ((p = dma_getnextrxp(di, FALSE))) {
++		/* skip giant packets which span multiple rx descriptors */
++		if (skiplen > 0) {
++			skiplen -= di->rxbufsize;
++			if (skiplen < 0)
++				skiplen = 0;
++			PKTFREE(di->osh, p, FALSE);
++			continue;
++		}
 +
-+	/* enable d11 mac interrupts */
-+	if (si->sb.chip == BCM4301_DEVICE_ID) {
-+		/* Have to use FCR2 in 4301 */
-+		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR2 + PCMCIA_COR, &cor, 1);
-+		cor |= COR_IRQEN | COR_FUNEN;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR2 + PCMCIA_COR, &cor, 1);
-+	} else {
-+		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
-+		cor |= COR_IRQEN | COR_FUNEN;
-+		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
++		len = ltoh16(*(uint16*)(PKTDATA(di->osh, p)));
++		DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
++
++		/* bad frame length check */
++		if (len > (di->rxbufsize - di->rxoffset)) {
++			DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len));
++			if (len > 0)
++				skiplen = len - (di->rxbufsize - di->rxoffset);
++			PKTFREE(di->osh, p, FALSE);
++			di->hnddma.rxgiants++;
++			continue;
++		}
++
++		/* set actual length */
++		PKTSETLEN(di->osh, p, (di->rxoffset + len));
++
++		break;
 +	}
 +
++	return (p);
 +}
 +
-+
-+/*
-+ * Configure the pci core for pci client (NIC) action
-+ * coremask is the bitvec of cores by index to be enabled.
-+ */
++/* post receive buffers */
 +void
-+sb_pci_setup(sb_t *sbh, uint coremask)
++dma_rxfill(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	sbconfig_t *sb;
-+	sbpciregs_t *pciregs;
-+	uint32 sbflag;
-+	uint32 w;
-+	uint idx;
-+	int reg_val;
++	void *p;
++	uint rxin, rxout;
++	uint32 ctrl;
++	uint n;
++	uint i;
++	uint32 pa;
++	uint rxbufsize;
 +
-+	si = SB_INFO(sbh);
++	/*
++	 * Determine how many receive buffers we're lacking
++	 * from the full complement, allocate, initialize,
++	 * and post them, then update the chip rx lastdscr.
++	 */
 +
-+	/* if not pci bus, we're done */
-+	if (BUSTYPE(si->sb.bustype) != PCI_BUS)
-+		return;
++	rxin = di->rxin;
++	rxout = di->rxout;
++	rxbufsize = di->rxbufsize;
 +
-+	ASSERT(PCI(si) || PCIE(si));
-+	ASSERT(si->sb.buscoreidx != BADIDX);
++	n = di->nrxpost - NRXDACTIVE(rxin, rxout);
 +
-+	/* get current core index */
-+	idx = si->curidx;
++	DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
 +
-+	/* we interrupt on this backplane flag number */
-+	ASSERT(GOODREGS(si->curmap));
-+	sb = REGS2SB(si->curmap);
-+	sbflag = R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
++	for (i = 0; i < n; i++) {
++		if ((p = PKTGET(di->osh, rxbufsize, FALSE)) == NULL) {
++			DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
++			di->hnddma.rxnobuf++;
++			break;
++		}
 +
-+	/* switch over to pci core */
-+	pciregs = (sbpciregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
-+	sb = REGS2SB(pciregs);
++		/* Do a cached write instead of uncached write since DMA_MAP
++		 * will flush the cache. */
++		*(uint32*)(PKTDATA(di->osh, p)) = 0;
 +
-+	/*
-+	 * Enable sb->pci interrupts.  Assume
-+	 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
-+	 */
-+	if (PCIE(si) || (PCI(si) && ((si->sb.buscorerev) >= 6))) {
-+		/* pci config write to set this core bit in PCIIntMask */
-+		w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
-+		w |= (coremask << PCI_SBIM_SHIFT);
-+		OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
-+	} else {
-+		/* set sbintvec bit for our flag number */
-+		OR_SBREG(si, &sb->sbintvec, (1 << sbflag));
-+	}
++		pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->osh, p), rxbufsize, DMA_RX, p);
++		ASSERT(ISALIGNED(pa, 4));
 +
-+	if (PCI(si)) {
-+		OR_REG(&pciregs->sbtopci2, (SBTOPCI_PREF|SBTOPCI_BURST));
-+		if (si->sb.buscorerev >= 11)
-+			OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
-+		if (si->sb.buscorerev < 5) {
-+			SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
-+				(0x3 << SBIMCL_RTO_SHIFT) | 0x2);
-+			sb_commit(sbh);
-+		}
-+	}
++		/* save the free packet pointer */
++		ASSERT(di->rxp[rxout] == NULL);
++		di->rxp[rxout] = p;
 +
-+	if (PCIE(si) && (si->sb.buscorerev == 0)) {
-+		reg_val = sb_pcie_readreg((void *)sbh, (void *)PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG);
-+		reg_val |= 0x8; 
-+		sb_pcie_writereg((void *)sbh, (void *)PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG, reg_val);
++		if (DMA64_ENAB(di)) {
++			/* prep the descriptor control value */
++			if (rxout == (di->nrxd - 1))
++				ctrl = CTRL_EOT;
 +
-+		reg_val = sb_pcie_readreg((void *)sbh, (void *)PCIE_PCIEREGS, PCIE_DLLP_LCREG);
-+		reg_val &= ~(0x40);
-+		sb_pcie_writereg(sbh, (void *)PCIE_PCIEREGS, PCIE_DLLP_LCREG, reg_val);
++			dma64_dd_upd(di, di->rxd64, pa, rxout, &ctrl, rxbufsize);
++		} else {
++			/* prep the descriptor control value */
++			ctrl = rxbufsize;
++			if (rxout == (di->nrxd - 1))
++				ctrl |= CTRL_EOT;
++			dma32_dd_upd(di, di->rxd32, pa, rxout, &ctrl);
++		}
 +
-+		BCMINIT(sb_war30841)(si);
++		rxout = NEXTRXD(rxout);
 +	}
 +
-+	/* switch back to previous core */
-+	sb_setcoreidx(sbh, idx);
++	di->rxout = rxout;
++
++	/* update the chip lastdscr pointer */
++	if (DMA64_ENAB(di)) {
++		W_REG(&di->d64rxregs->ptr, I2B(rxout, dma64dd_t));
++	} else {
++		W_REG(&di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
++	}
 +}
 +
-+uint32
-+sb_base(uint32 admatch)
++void
++dma_txreclaim(dma_info_t *di, bool forceall)
 +{
-+	uint32 base;
-+	uint type;
++	void *p;
 +
-+	type = admatch & SBAM_TYPE_MASK;
-+	ASSERT(type < 3);
++	DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
 +
-+	base = 0;
++	while ((p = dma_getnexttxp(di, forceall)))
++		PKTFREE(di->osh, p, TRUE);
++}
 +
-+	if (type == 0) {
-+		base = admatch & SBAM_BASE0_MASK;
-+	} else if (type == 1) {
-+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
-+		base = admatch & SBAM_BASE1_MASK;
-+	} else if (type == 2) {
-+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
-+		base = admatch & SBAM_BASE2_MASK;
++/*
++ * Reclaim next completed txd (txds if using chained buffers) and
++ * return associated packet.
++ * If 'force' is true, reclaim txd(s) and return associated packet
++ * regardless of the value of the hardware "curr" pointer.
++ */
++void*
++dma_getnexttxp(dma_info_t *di, bool forceall)
++{
++	if (DMA64_ENAB(di)) {
++		return dma64_getnexttxp(di, forceall);
++	} else {
++		return dma32_getnexttxp(di, forceall);
 +	}
-+
-+	return (base);
 +}
-+
-+uint32
-+sb_size(uint32 admatch)
++	
++/* like getnexttxp but no reclaim */
++void*
++dma_peeknexttxp(dma_info_t *di)
 +{
-+	uint32 size;
-+	uint type;
++	uint end, i;
 +
-+	type = admatch & SBAM_TYPE_MASK;
-+	ASSERT(type < 3);
++	if (DMA64_ENAB(di)) {
++		end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
++	} else {
++		end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
++	}
 +
-+	size = 0;
++	for (i = di->txin; i != end; i = NEXTTXD(i))
++		if (di->txp[i])
++			return (di->txp[i]);
 +
-+	if (type == 0) {
-+		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
-+	} else if (type == 1) {
-+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
-+		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
-+	} else if (type == 2) {
-+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
-+		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
-+	}
++	return (NULL);
++}
 +
-+	return (size);
++/*
++ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
++ */
++void
++dma_txrotate(di_t *di)
++{
++	if (DMA64_ENAB(di)) {
++		dma64_txrotate(di);
++	} else {
++		dma32_txrotate(di);
++	}
 +}
 +
-+/* return the core-type instantiation # of the current core */
-+uint
-+sb_coreunit(sb_t *sbh)
++void
++dma_rxreclaim(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	uint idx;
-+	uint coreid;
-+	uint coreunit;
-+	uint i;
++	void *p;
 +
-+	si = SB_INFO(sbh);
-+	coreunit = 0;
++	DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
 +
-+	idx = si->curidx;
++	while ((p = dma_getnextrxp(di, TRUE)))
++		PKTFREE(di->osh, p, FALSE);
++}
 +
-+	ASSERT(GOODREGS(si->curmap));
-+	coreid = sb_coreid(sbh);
++void *
++dma_getnextrxp(dma_info_t *di, bool forceall)
++{
++	if (DMA64_ENAB(di)) {
++		return dma64_getnextrxp(di, forceall);
++	} else {
++		return dma32_getnextrxp(di, forceall);
++	}
++}
 +
-+	/* count the cores of our type */
-+	for (i = 0; i < idx; i++)
-+		if (si->coreid[i] == coreid)
-+			coreunit++;
++uintptr
++dma_getvar(dma_info_t *di, char *name)
++{
++	if (!strcmp(name, "&txavail"))
++		return ((uintptr) &di->txavail);
++	else {
++		ASSERT(0);
++	}
++	return (0);
++}
 +
-+	return (coreunit);
++void
++dma_txblock(dma_info_t *di)
++{
++	di->txavail = 0;
 +}
 +
-+static INLINE uint32
-+factor6(uint32 x)
++void
++dma_txunblock(dma_info_t *di)
 +{
-+	switch (x) {
-+	case CC_F6_2:	return 2;
-+	case CC_F6_3:	return 3;
-+	case CC_F6_4:	return 4;
-+	case CC_F6_5:	return 5;
-+	case CC_F6_6:	return 6;
-+	case CC_F6_7:	return 7;
-+	default:	return 0;
-+	}
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +}
 +
-+/* calculate the speed the SB would run at given a set of clockcontrol values */
-+uint32
-+sb_clock_rate(uint32 pll_type, uint32 n, uint32 m)
++uint
++dma_txactive(dma_info_t *di)
 +{
-+	uint32 n1, n2, clock, m1, m2, m3, mc;
++	return (NTXDACTIVE(di->txin, di->txout));
++}
++	
++void
++dma_rxpiomode(dma32regs_t *regs)
++{
++	W_REG(&regs->control, RC_FM);
++}
 +
-+	n1 = n & CN_N1_MASK;
-+	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
++void
++dma_txpioloopback(dma32regs_t *regs)
++{
++	OR_REG(&regs->control, XC_LE);
++}
 +
-+	if (pll_type == PLL_TYPE6) {
-+		if (m & CC_T6_MMASK)
-+			return CC_T6_M1;
-+		else
-+			return CC_T6_M0;
-+	} else if ((pll_type == PLL_TYPE1) ||
-+		   (pll_type == PLL_TYPE3) ||
-+		   (pll_type == PLL_TYPE4) ||
-+		   (pll_type == PLL_TYPE7)) {
-+		n1 = factor6(n1);
-+		n2 += CC_F5_BIAS;
-+	} else if (pll_type == PLL_TYPE2) {
-+		n1 += CC_T2_BIAS;
-+		n2 += CC_T2_BIAS;
-+		ASSERT((n1 >= 2) && (n1 <= 7));
-+		ASSERT((n2 >= 5) && (n2 <= 23));
-+	} else if (pll_type == PLL_TYPE5) {
-+		return (100000000);
-+	} else
-+		ASSERT(0);
-+	/* PLL types 3 and 7 use BASE2 (25Mhz) */
-+	if ((pll_type == PLL_TYPE3) ||
-+	    (pll_type == PLL_TYPE7)) { 
-+		clock =  CC_CLOCK_BASE2 * n1 * n2;
-+	}
-+	else 
-+		clock = CC_CLOCK_BASE1 * n1 * n2;
 +
-+	if (clock == 0)
-+		return 0;
 +
-+	m1 = m & CC_M1_MASK;
-+	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
-+	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
-+	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
 +
-+	if ((pll_type == PLL_TYPE1) ||
-+	    (pll_type == PLL_TYPE3) ||
-+	    (pll_type == PLL_TYPE4) ||
-+	    (pll_type == PLL_TYPE7)) {
-+		m1 = factor6(m1);
-+		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
-+			m2 += CC_F5_BIAS;
-+		else
-+			m2 = factor6(m2);
-+		m3 = factor6(m3);
++/*** 32 bits DMA non-inline functions ***/
++static bool
++dma32_alloc(dma_info_t *di, uint direction)
++{
++	uint size;
++	uint ddlen;
++	void *va;
 +
-+		switch (mc) {
-+		case CC_MC_BYPASS:	return (clock);
-+		case CC_MC_M1:		return (clock / m1);
-+		case CC_MC_M1M2:	return (clock / (m1 * m2));
-+		case CC_MC_M1M2M3:	return (clock / (m1 * m2 * m3));
-+		case CC_MC_M1M3:	return (clock / (m1 * m3));
-+		default:		return (0);
-+		}
-+	} else {
-+		ASSERT(pll_type == PLL_TYPE2);
++	ddlen = sizeof (dma32dd_t);
 +
-+		m1 += CC_T2_BIAS;
-+		m2 += CC_T2M2_BIAS;
-+		m3 += CC_T2_BIAS;
-+		ASSERT((m1 >= 2) && (m1 <= 7));
-+		ASSERT((m2 >= 3) && (m2 <= 10));
-+		ASSERT((m3 >= 2) && (m3 <= 7));
++	size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
++
++	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, D32RINGALIGN))
++		size += D32RINGALIGN;
 +
-+		if ((mc & CC_T2MC_M1BYP) == 0)
-+			clock /= m1;
-+		if ((mc & CC_T2MC_M2BYP) == 0)
-+			clock /= m2;
-+		if ((mc & CC_T2MC_M3BYP) == 0)
-+			clock /= m3;
 +
-+		return(clock);
++	if (direction == DMA_TX) {
++		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
++			return FALSE;
++		}
++
++		di->txd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
++		di->txdalign = (uint)((int8*)di->txd32 - (int8*)va);
++		di->txdpa += di->txdalign;
++		di->txdalloc = size;
++		ASSERT(ISALIGNED((uintptr)di->txd32, D32RINGALIGN));
++	} else {
++		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
++			return FALSE;
++		}
++		di->rxd32 = (dma32dd_t*) ROUNDUP((uintptr)va, D32RINGALIGN);
++		di->rxdalign = (uint)((int8*)di->rxd32 - (int8*)va);
++		di->rxdpa += di->rxdalign;
++		di->rxdalloc = size;
++		ASSERT(ISALIGNED((uintptr)di->rxd32, D32RINGALIGN));
 +	}
++
++	return TRUE;
 +}
 +
-+/* returns the current speed the SB is running at */
-+uint32
-+sb_clock(sb_t *sbh)
++static void 
++dma32_txreset(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	extifregs_t *eir;
-+	chipcregs_t *cc;
-+	uint32 n, m;
-+	uint idx;
-+	uint32 pll_type, rate;
-+	uint intr_val = 0;
++	uint32 status;
 +
-+	si = SB_INFO(sbh);
-+	idx = si->curidx;
-+	pll_type = PLL_TYPE1;
++	/* suspend tx DMA first */
++	W_REG(&di->d32txregs->control, XC_SE);
++	SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED &&
++		 status != XS_XS_IDLE &&
++		 status != XS_XS_STOPPED,
++		 10000);
 +
-+	INTR_OFF(si, intr_val);
++	W_REG(&di->d32txregs->control, 0);
++	SPINWAIT((status = (R_REG(&di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED,
++		 10000);
 +
-+	/* switch to extif or chipc core */
-+	if ((eir = (extifregs_t *) sb_setcore(sbh, SB_EXTIF, 0))) {
-+		n = R_REG(&eir->clockcontrol_n);
-+		m = R_REG(&eir->clockcontrol_sb);
-+	} else if ((cc = (chipcregs_t *) sb_setcore(sbh, SB_CC, 0))) {
-+		pll_type = R_REG(&cc->capabilities) & CAP_PLL_MASK;
-+		n = R_REG(&cc->clockcontrol_n);
-+		if (pll_type == PLL_TYPE6)
-+			m = R_REG(&cc->clockcontrol_mips);
-+		else if (pll_type == PLL_TYPE3)
-+		{
-+			// Added by Chen-I for 5365 
-+			if (BCMINIT(sb_chip)(sbh) == BCM5365_DEVICE_ID) 	
-+				m = R_REG(&cc->clockcontrol_sb);
-+			else
-+				m = R_REG(&cc->clockcontrol_m2);
-+		}
-+		else
-+			m = R_REG(&cc->clockcontrol_sb);
-+	} else {
-+		INTR_RESTORE(si, intr_val);
-+		return 0;
++	if (status != XS_XS_DISABLED) {
++		DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
 +	}
 +
-+	// Added by Chen-I for 5365 
-+	if (BCMINIT(sb_chip)(sbh) == BCM5365_DEVICE_ID)
-+	{
-+		rate = 100000000;
-+	}
-+	else
-+	{	
-+		/* calculate rate */
-+		rate = sb_clock_rate(pll_type, n, m);
-+		if (pll_type == PLL_TYPE3)
-+			rate = rate / 2;
-+	}
++	/* wait for the last transaction to complete */
++	OSL_DELAY(300);
++}
 +
-+	/* switch back to previous core */
-+	sb_setcoreidx(sbh, idx);
++static void 
++dma32_rxreset(dma_info_t *di)
++{
++	uint32 status;
 +
-+	INTR_RESTORE(si, intr_val);
++	W_REG(&di->d32rxregs->control, 0);
++	SPINWAIT((status = (R_REG(&di->d32rxregs->status) & RS_RS_MASK)) != RS_RS_DISABLED,
++		 10000);
 +
-+	return rate;
++	if (status != RS_RS_DISABLED) {
++		DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
++	}
 +}
 +
-+/* change logical "focus" to the gpio core for optimized access */
-+void*
-+sb_gpiosetcore(sb_t *sbh)
++static bool
++dma32_txsuspendedidle(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+
-+	si = SB_INFO(sbh);
++	if (!(R_REG(&di->d32txregs->control) & XC_SE))
++		return 0;
++	
++	if ((R_REG(&di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
++		return 0;
 +
-+	return (sb_setcoreidx(sbh, si->gpioidx));
++	OSL_DELAY(2);
++	return ((R_REG(&di->d32txregs->status) & XS_XS_MASK) == XS_XS_IDLE);
 +}
 +
-+/* mask&set gpiocontrol bits */
-+uint32
-+sb_gpiocontrol(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
++/*
++ * supports full 32bit dma engine buffer addressing so
++ * dma buffers can cross 4 Kbyte page boundaries.
++ */
++static int
++dma32_txfast(dma_info_t *di, void *p0, uint32 coreflags)
 +{
-+	sb_info_t *si;
-+	uint regoff;
++	void *p, *next;
++	uchar *data;
++	uint len;
++	uint txout;
++	uint32 ctrl;
++	uint32 pa;	
 +
-+	si = SB_INFO(sbh);
-+	regoff = 0;
++	DMA_TRACE(("%s: dma_txfast\n", di->name));
 +
-+	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++	txout = di->txout;
++	ctrl = 0;
 +
-+	/* gpios could be shared on router platforms */
-+	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
-+		mask = priority ? (sb_gpioreservation & mask) :
-+			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
-+		val &= mask;
-+	}
++	/*
++	 * Walk the chain of packet buffers
++	 * allocating and initializing transmit descriptor entries.
++	 */
++	for (p = p0; p; p = next) {
++		data = PKTDATA(di->osh, p);
++		len = PKTLEN(di->osh, p);
++		next = PKTNEXT(di->osh, p);
 +
-+	switch (si->gpioid) {
-+	case SB_CC:
-+		regoff = OFFSETOF(chipcregs_t, gpiocontrol);
-+		break;
++		/* return nonzero if out of tx descriptors */
++		if (NEXTTXD(txout) == di->txin)
++			goto outoftxd;
 +
-+	case SB_PCI:
-+		regoff = OFFSETOF(sbpciregs_t, gpiocontrol);
-+		break;
++		if (len == 0)
++			continue;
 +
-+	case SB_EXTIF:
-+		return (0);
-+	}
++		/* get physical address of buffer start */
++		pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
 +
-+	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
-+}
++		/* build the descriptor control value */
++		ctrl = len & CTRL_BC_MASK;
 +
-+/* mask&set gpio output enable bits */
-+uint32
-+sb_gpioouten(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
-+{
-+	sb_info_t *si;
-+	uint regoff;
++		ctrl |= coreflags;
++		
++		if (p == p0)
++			ctrl |= CTRL_SOF;
++		if (next == NULL)
++			ctrl |= (CTRL_IOC | CTRL_EOF);
++		if (txout == (di->ntxd - 1))
++			ctrl |= CTRL_EOT;
 +
-+	si = SB_INFO(sbh);
-+	regoff = 0;
++		if (DMA64_ENAB(di)) {
++			dma64_dd_upd(di, di->txd64, pa, txout, &ctrl, len);
++		} else {
++			dma32_dd_upd(di, di->txd32, pa, txout, &ctrl);
++		}
 +
-+	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++		ASSERT(di->txp[txout] == NULL);
 +
-+	/* gpios could be shared on router platforms */
-+	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
-+		mask = priority ? (sb_gpioreservation & mask) :
-+			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
-+		val &= mask;
++		txout = NEXTTXD(txout);
 +	}
 +
-+	switch (si->gpioid) {
-+	case SB_CC:
-+		regoff = OFFSETOF(chipcregs_t, gpioouten);
-+		break;
++	/* if last txd eof not set, fix it */
++	if (!(ctrl & CTRL_EOF))
++		W_SM(&di->txd32[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF));
 +
-+	case SB_PCI:
-+		regoff = OFFSETOF(sbpciregs_t, gpioouten);
-+		break;
++	/* save the packet */
++	di->txp[PREVTXD(txout)] = p0;
 +
-+	case SB_EXTIF:
-+		regoff = OFFSETOF(extifregs_t, gpio[0].outen);
-+		break;
++	/* bump the tx descriptor index */
++	di->txout = txout;
++
++	/* kick the chip */
++	if (DMA64_ENAB(di)) {
++		W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
++	} else {
++		W_REG(&di->d32txregs->ptr, I2B(txout, dma32dd_t));
 +	}
 +
-+	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
++	/* tx flow control */
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++
++	return (0);
++
++ outoftxd:
++	DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
++	PKTFREE(di->osh, p0, TRUE);
++	di->txavail = 0;
++	di->hnddma.txnobuf++;
++	return (-1);
 +}
 +
-+/* mask&set gpio output bits */
-+uint32
-+sb_gpioout(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
++static void*
++dma32_getnexttxp(dma_info_t *di, bool forceall)
 +{
-+	sb_info_t *si;
-+	uint regoff;
++	uint start, end, i;
++	void *txp;
++
++	DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
++
++	txp = NULL;
++
++	start = di->txin;
++	if (forceall)
++		end = di->txout;
++	else
++		end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
 +
-+	si = SB_INFO(sbh);
-+	regoff = 0;
++	if ((start == 0) && (end > di->txout))
++		goto bogus;
 +
-+	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++	for (i = start; i != end && !txp; i = NEXTTXD(i)) {
++		DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - di->dataoffsetlow),
++			  (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & CTRL_BC_MASK), DMA_TX, di->txp[i]);
 +
-+	/* gpios could be shared on router platforms */
-+	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
-+		mask = priority ? (sb_gpioreservation & mask) :
-+			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
-+		val &= mask;
++		W_SM(&di->txd32[i].addr, 0xdeadbeef);
++		txp = di->txp[i];
++		di->txp[i] = NULL;
 +	}
 +
-+	switch (si->gpioid) {
-+	case SB_CC:
-+		regoff = OFFSETOF(chipcregs_t, gpioout);
-+		break;
++	di->txin = i;
 +
-+	case SB_PCI:
-+		regoff = OFFSETOF(sbpciregs_t, gpioout);
-+		break;
++	/* tx flow control */
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +
-+	case SB_EXTIF:
-+		regoff = OFFSETOF(extifregs_t, gpio[0].out);
-+		break;
-+	}
++	return (txp);
 +
-+	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
++bogus:
++/*
++	DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
++		start, end, di->txout, forceall));
++*/
++	return (NULL);
 +}
 +
-+/* reserve one gpio */
-+uint32
-+sb_gpioreserve(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
++static void *
++dma32_getnextrxp(dma_info_t *di, bool forceall)
 +{
-+	sb_info_t *si;
++	uint i;
++	void *rxp;
 +
-+	si = SB_INFO(sbh);
++	/* if forcing, dma engine must be disabled */
++	ASSERT(!forceall || !dma_rxenabled(di));
 +
-+	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++	i = di->rxin;
 +
-+	/* only cores on SB_BUS share GPIO's and only applcation users need to reserve/release GPIO */
-+	if ( (BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority))  {
-+		ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
-+		return -1;
-+	}
-+	/* make sure only one bit is set */
-+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
-+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
-+		return -1;
-+	}
++	/* return if no packets posted */
++	if (i == di->rxout)
++		return (NULL);
 +
-+	/* already reserved */
-+	if (sb_gpioreservation & gpio_bitmask)
-+		return -1;
-+	/* set reservation */
-+	sb_gpioreservation |= gpio_bitmask;
++	/* ignore curr if forceall */
++	if (!forceall && (i == B2I(R_REG(&di->d32rxregs->status) & RS_CD_MASK, dma32dd_t)))
++		return (NULL);
 +
-+	return sb_gpioreservation;
-+}
++	/* get the packet pointer that corresponds to the rx descriptor */
++	rxp = di->rxp[i];
++	ASSERT(rxp);
++	di->rxp[i] = NULL;
 +
-+/* release one gpio */
-+/* 
-+ * releasing the gpio doesn't change the current value on the GPIO last write value 
-+ * persists till some one overwrites it
-+*/
++	/* clear this packet from the descriptor ring */
++	DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - di->dataoffsetlow),
++		  di->rxbufsize, DMA_RX, rxp);
++	W_SM(&di->rxd32[i].addr, 0xdeadbeef);
 +
-+uint32
-+sb_gpiorelease(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
-+{
-+	sb_info_t *si;
++	di->rxin = NEXTRXD(i);
 +
-+	si = SB_INFO(sbh);
++	return (rxp);
++}
 +
-+	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++static void
++dma32_txrotate(di_t *di)
++{
++	uint ad;
++	uint nactive;
++	uint rot;
++	uint old, new;
++	uint32 w;
++	uint first, last;
 +
-+	/* only cores on SB_BUS share GPIO's and only applcation users need to reserve/release GPIO */
-+	if ( (BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority))  {
-+		ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
-+		return -1;
-+	}
-+	/* make sure only one bit is set */
-+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
-+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
-+		return -1;
-+	}
-+	
-+	/* already released */
-+	if (!(sb_gpioreservation & gpio_bitmask))
-+		return -1;
++	ASSERT(dma_txsuspendedidle(di));
 +
-+	/* clear reservation */
-+	sb_gpioreservation &= ~gpio_bitmask;
++	nactive = dma_txactive(di);
++	ad = B2I(((R_REG(&di->d32txregs->status) & XS_AD_MASK) >> XS_AD_SHIFT), dma32dd_t);
++	rot = TXD(ad - di->txin);
 +
-+	return sb_gpioreservation;
-+}
++	ASSERT(rot < di->ntxd);
 +
-+/* return the current gpioin register value */
-+uint32
-+sb_gpioin(sb_t *sbh)
-+{
-+	sb_info_t *si;
-+	uint regoff;
++	/* full-ring case is a lot harder - don't worry about this */
++	if (rot >= (di->ntxd - nactive)) {
++		DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
++		return;
++	}
 +
-+	si = SB_INFO(sbh);
-+	regoff = 0;
++	first = di->txin;
++	last = PREVTXD(di->txout);
 +
-+	switch (si->gpioid) {
-+	case SB_CC:
-+		regoff = OFFSETOF(chipcregs_t, gpioin);
-+		break;
++	/* move entries starting at last and moving backwards to first */
++	for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
++		new = TXD(old + rot);
 +
-+	case SB_PCI:
-+		regoff = OFFSETOF(sbpciregs_t, gpioin);
-+		break;
++		/*
++		 * Move the tx dma descriptor.
++		 * EOT is set only in the last entry in the ring.
++		 */
++		w = R_SM(&di->txd32[old].ctrl) & ~CTRL_EOT;
++		if (new == (di->ntxd - 1))
++			w |= CTRL_EOT;
++		W_SM(&di->txd32[new].ctrl, w);
++		W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
 +
-+	case SB_EXTIF:
-+		regoff = OFFSETOF(extifregs_t, gpioin);
-+		break;
++		/* zap the old tx dma descriptor address field */
++		W_SM(&di->txd32[old].addr, 0xdeadbeef);
++
++		/* move the corresponding txp[] entry */
++		ASSERT(di->txp[new] == NULL);
++		di->txp[new] = di->txp[old];
++		di->txp[old] = NULL;
 +	}
 +
-+	return (sb_corereg(si, si->gpioidx, regoff, 0, 0));
++	/* update txin and txout */
++	di->txin = ad;
++	di->txout = TXD(di->txout + rot);
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
++
++	/* kick the chip */
++	W_REG(&di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
 +}
 +
-+/* mask&set gpio interrupt polarity bits */
-+uint32
-+sb_gpiointpolarity(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
-+{
-+	sb_info_t *si;
-+	uint regoff;
++/*** 64 bits DMA non-inline functions ***/
 +
-+	si = SB_INFO(sbh);
-+	regoff = 0;
++#ifdef BCMDMA64
 +
-+	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++static bool
++dma64_alloc(dma_info_t *di, uint direction)
++{
++	uint size;
++	uint ddlen;
++	uint32 alignbytes;
++	void *va;
 +
-+	/* gpios could be shared on router platforms */
-+	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
-+		mask = priority ? (sb_gpioreservation & mask) :
-+			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
-+		val &= mask;
-+	}
++	ddlen = sizeof (dma64dd_t);
 +
-+	switch (si->gpioid) {
-+	case SB_CC:
-+		regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
-+		break;
++	size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
 +
-+	case SB_PCI:
-+		/* pci gpio implementation does not support interrupt polarity */
-+		ASSERT(0);
-+		break;
++	alignbytes = di->dma64align;
 +
-+	case SB_EXTIF:
-+		regoff = OFFSETOF(extifregs_t, gpiointpolarity);
-+		break;
-+	}
++	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, alignbytes))
++		size += alignbytes;
 +
-+	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
-+}
 +
-+/* mask&set gpio interrupt mask bits */
-+uint32
-+sb_gpiointmask(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
-+{
-+	sb_info_t *si;
-+	uint regoff;
++	if (direction == DMA_TX) {
++		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
++			return FALSE;
++		}
 +
-+	si = SB_INFO(sbh);
-+	regoff = 0;
++		di->txd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
++		di->txdalign = (uint)((int8*)di->txd64 - (int8*)va);
++		di->txdpa += di->txdalign;
++		di->txdalloc = size;
++		ASSERT(ISALIGNED((uintptr)di->txd64, alignbytes));
++	} else {
++		if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa)) == NULL) {
++			DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
++			return FALSE;
++		}
++		di->rxd64 = (dma64dd_t*) ROUNDUP((uintptr)va, alignbytes);
++		di->rxdalign = (uint)((int8*)di->rxd64 - (int8*)va);
++		di->rxdpa += di->rxdalign;
++		di->rxdalloc = size;
++		ASSERT(ISALIGNED((uintptr)di->rxd64, alignbytes));
++	}
 +
-+	priority = GPIO_DRV_PRIORITY; /* compatibility hack */
++	return TRUE;
++}
 +
-+	/* gpios could be shared on router platforms */
-+	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
-+		mask = priority ? (sb_gpioreservation & mask) :
-+			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
-+		val &= mask;
-+	}
++static void 
++dma64_txreset(dma_info_t *di)
++{
++	uint32 status;
 +
-+	switch (si->gpioid) {
-+	case SB_CC:
-+		regoff = OFFSETOF(chipcregs_t, gpiointmask);
-+		break;
++	/* suspend tx DMA first */
++	W_REG(&di->d64txregs->control, D64_XC_SE);
++	SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED &&
++		 status != D64_XS0_XS_IDLE &&
++		 status != D64_XS0_XS_STOPPED,
++		 10000);
 +
-+	case SB_PCI:
-+		/* pci gpio implementation does not support interrupt mask */
-+		ASSERT(0);
-+		break;
++	W_REG(&di->d64txregs->control, 0);
++	SPINWAIT((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED,
++		 10000);
 +
-+	case SB_EXTIF:
-+		regoff = OFFSETOF(extifregs_t, gpiointmask);
-+		break;
++	if (status != D64_XS0_XS_DISABLED) {
++		DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name));
 +	}
 +
-+	return (sb_corereg(si, si->gpioidx, regoff, mask, val));
++	/* wait for the last transaction to complete */
++	OSL_DELAY(300);
 +}
 +
-+/* assign the gpio to an led */
-+uint32
-+sb_gpioled(sb_t *sbh, uint32 mask, uint32 val)
++static void 
++dma64_rxreset(dma_info_t *di)
 +{
-+	sb_info_t *si;
++	uint32 status;
 +
-+	si = SB_INFO(sbh);
-+	if (si->sb.ccrev < 16)
-+		return -1;
++	W_REG(&di->d64rxregs->control, 0);
++	SPINWAIT((status = (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED,
++		 10000);
 +
-+	/* gpio led powersave reg */
-+	return(sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
++	if (status != D64_RS0_RS_DISABLED) {
++		DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name));
++	}
 +}
 +
-+/* mask&set gpio timer val */
-+uint32 
-+sb_gpiotimerval(sb_t *sbh, uint32 mask, uint32 gpiotimerval)
++static bool
++dma64_txsuspendedidle(dma_info_t *di)
 +{
-+	sb_info_t *si;
-+	si = SB_INFO(sbh);
 +
-+	if (si->sb.ccrev < 16)
-+		return -1;
++	if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
++		return 0;
++	
++	if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_IDLE)
++		return 1;
 +
-+	return(sb_corereg(si, 0, OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
++	return 0;
 +}
 +
-+
-+/* return the slow clock source - LPO, XTAL, or PCI */
-+static uint
-+sb_slowclk_src(sb_info_t *si)
++/*
++ * supports full 32bit dma engine buffer addressing so
++ * dma buffers can cross 4 Kbyte page boundaries.
++ */
++static int
++dma64_txfast(dma_info_t *di, void *p0, uint32 coreflags)
 +{
-+	chipcregs_t *cc;
++	void *p, *next;
++	uchar *data;
++	uint len;
++	uint txout;
++	uint32 flags;
++	uint32 pa;	
 +
++	DMA_TRACE(("%s: dma_txfast\n", di->name));
 +
-+	ASSERT(sb_coreid(&si->sb) == SB_CC);
++	txout = di->txout;
++	flags = 0;
 +
-+	if (si->sb.ccrev < 6) {
-+		if ((BUSTYPE(si->sb.bustype) == PCI_BUS)
-+			&& (OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32)) & PCI_CFG_GPIO_SCS))
-+			return (SCC_SS_PCI);
-+		else
-+			return (SCC_SS_XTAL);
-+	} else if (si->sb.ccrev < 10) {
-+		cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
-+		return (R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK);
-+	} else	/* Insta-clock */
-+		return (SCC_SS_XTAL);
-+}
++	/*
++	 * Walk the chain of packet buffers
++	 * allocating and initializing transmit descriptor entries.
++	 */
++	for (p = p0; p; p = next) {
++		data = PKTDATA(di->osh, p);
++		len = PKTLEN(di->osh, p);
++		next = PKTNEXT(di->osh, p);
 +
-+/* return the ILP (slowclock) min or max frequency */
-+static uint
-+sb_slowclk_freq(sb_info_t *si, bool max)
-+{
-+	chipcregs_t *cc;
-+	uint32 slowclk;
-+	uint div;
++		/* return nonzero if out of tx descriptors */
++		if (NEXTTXD(txout) == di->txin)
++			goto outoftxd;
 +
++		if (len == 0)
++			continue;
 +
-+	ASSERT(sb_coreid(&si->sb) == SB_CC);
++		/* get physical address of buffer start */
++		pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
 +
-+	cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
++		flags = coreflags;
++		
++		if (p == p0)
++			flags |= D64_CTRL1_SOF;
++		if (next == NULL)
++			flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
++		if (txout == (di->ntxd - 1))
++			flags |= D64_CTRL1_EOT;
 +
-+	/* shouldn't be here unless we've established the chip has dynamic clk control */
-+	ASSERT(R_REG(&cc->capabilities) & CAP_PWR_CTL);
++		dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
 +
-+	slowclk = sb_slowclk_src(si);
-+	if (si->sb.ccrev < 6) {
-+		if (slowclk == SCC_SS_PCI)
-+			return (max? (PCIMAXFREQ/64) : (PCIMINFREQ/64));
-+		else
-+			return (max? (XTALMAXFREQ/32) : (XTALMINFREQ/32));
-+	} else if (si->sb.ccrev < 10) {
-+		div = 4 * (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
-+		if (slowclk == SCC_SS_LPO)
-+			return (max? LPOMAXFREQ : LPOMINFREQ);
-+		else if (slowclk == SCC_SS_XTAL)
-+			return (max? (XTALMAXFREQ/div) : (XTALMINFREQ/div));
-+		else if (slowclk == SCC_SS_PCI)
-+			return (max? (PCIMAXFREQ/div) : (PCIMINFREQ/div));
-+		else
-+			ASSERT(0);
-+	} else {
-+		/* Chipc rev 10 is InstaClock */
-+		div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
-+		div = 4 * (div + 1);
-+		return (max ? XTALMAXFREQ : (XTALMINFREQ/div));
++		ASSERT(di->txp[txout] == NULL);
++
++		txout = NEXTTXD(txout);
 +	}
-+	return (0);
-+}
 +
-+static void
-+sb_clkctl_setdelay(sb_info_t *si, void *chipcregs)
-+{
-+	chipcregs_t * cc;
-+	uint slowmaxfreq, pll_delay, slowclk;
-+	uint pll_on_delay, fref_sel_delay;
++	/* if last txd eof not set, fix it */
++	if (!(flags & D64_CTRL1_EOF))
++		W_SM(&di->txd64[PREVTXD(txout)].ctrl1, BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
 +
-+	pll_delay = PLL_DELAY;
++	/* save the packet */
++	di->txp[PREVTXD(txout)] = p0;
 +
-+	/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
-+	 * since the xtal will also be powered down by dynamic clk control logic.
-+	 */
-+	slowclk = sb_slowclk_src(si);
-+	if (slowclk != SCC_SS_XTAL)
-+		pll_delay += XTAL_ON_DELAY;
++	/* bump the tx descriptor index */
++	di->txout = txout;
 +
-+	/* Starting with 4318 it is ILP that is used for the delays */
-+	slowmaxfreq = sb_slowclk_freq(si, (si->sb.ccrev >= 10) ? FALSE : TRUE);
++	/* kick the chip */
++	W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t));
 +
-+	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
-+	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
++	/* tx flow control */
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +
-+	cc = (chipcregs_t *)chipcregs;
-+	W_REG(&cc->pll_on_delay, pll_on_delay);
-+	W_REG(&cc->fref_sel_delay, fref_sel_delay);
++	return (0);
++
++outoftxd:
++	DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
++	PKTFREE(di->osh, p0, TRUE);
++	di->txavail = 0;
++	di->hnddma.txnobuf++;
++	return (-1);
 +}
 +
-+int
-+sb_pwrctl_slowclk(void *sbh, bool set, uint *div)
++static void*
++dma64_getnexttxp(dma_info_t *di, bool forceall)
 +{
-+	sb_info_t *si;
-+	uint origidx;
-+	chipcregs_t *cc;
-+	uint intr_val = 0;
-+	uint err = 0;
-+	
-+	si = SB_INFO(sbh);
++	uint start, end, i;
++	void *txp;
 +
-+	/* chipcommon cores prior to rev6 don't support slowclkcontrol */
-+	if (si->sb.ccrev < 6)
-+		return 1;
++	DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
 +
-+	/* chipcommon cores rev10 are a whole new ball game */
-+	if (si->sb.ccrev >= 10)
-+		return 1;
++	txp = NULL;
 +
-+	if (set && ((*div % 4) || (*div < 4)))
-+		return 2;
-+	
-+	INTR_OFF(si, intr_val);
-+	origidx = si->curidx;
-+	cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
-+	ASSERT(cc != NULL);
-+	
-+	if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL)) {
-+		err = 3;
-+		goto done;
++	start = di->txin;
++	if (forceall)
++		end = di->txout;
++	else
++		end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
++
++	if ((start == 0) && (end > di->txout))
++		goto bogus;
++
++	for (i = start; i != end && !txp; i = NEXTTXD(i)) {
++		DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) - di->dataoffsetlow),
++			  (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) & D64_CTRL2_BC_MASK), DMA_TX, di->txp[i]);
++
++		W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
++		W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
++
++		txp = di->txp[i];
++		di->txp[i] = NULL;
 +	}
 +
-+	if (set) {
-+		SET_REG(&cc->slow_clk_ctl, SCC_CD_MASK, ((*div / 4 - 1) << SCC_CD_SHIFT));
-+		sb_clkctl_setdelay(sbh, (void *)cc);
-+	} else
-+		*div = 4 * (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
++	di->txin = i;
++
++	/* tx flow control */
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +
-+done:
-+	sb_setcoreidx(sbh, origidx);
-+	INTR_RESTORE(si, intr_val);
-+	return err;
++	return (txp);
++
++bogus:
++/*
++	DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
++		start, end, di->txout, forceall));
++*/
++	return (NULL);
 +}
 +
-+/* initialize power control delay registers */
-+void sb_clkctl_init(sb_t *sbh)
++static void *
++dma64_getnextrxp(dma_info_t *di, bool forceall)
 +{
-+	sb_info_t *si;
-+	uint origidx;
-+	chipcregs_t *cc;
++	uint i;
++	void *rxp;
 +
-+	si = SB_INFO(sbh);
++	/* if forcing, dma engine must be disabled */
++	ASSERT(!forceall || !dma_rxenabled(di));
 +
-+	origidx = si->curidx;
++	i = di->rxin;
 +
-+	if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
-+		return;
++	/* return if no packets posted */
++	if (i == di->rxout)
++		return (NULL);
 +
-+	if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
-+		goto done;
++	/* ignore curr if forceall */
++	if (!forceall && (i == B2I(R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK, dma64dd_t)))
++		return (NULL);
 +
-+	/* 4317pc does not work with SlowClock less than 5 MHz */
-+	if ((BUSTYPE(si->sb.bustype) == PCMCIA_BUS) && (si->sb.ccrev >= 6) && (si->sb.ccrev < 10))
-+		SET_REG(&cc->slow_clk_ctl, SCC_CD_MASK, (ILP_DIV_5MHZ << SCC_CD_SHIFT));
++	/* get the packet pointer that corresponds to the rx descriptor */
++	rxp = di->rxp[i];
++	ASSERT(rxp);
++	di->rxp[i] = NULL;
 +
-+	/* set all Instaclk chip ILP to 1 MHz */
-+	else if (si->sb.ccrev >= 10)
-+		SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK, (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
-+	
-+	sb_clkctl_setdelay(si, (void *)cc);
++	/* clear this packet from the descriptor ring */
++	DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) - di->dataoffsetlow),
++		  di->rxbufsize, DMA_RX, rxp);
 +
-+done:
-+	sb_setcoreidx(sbh, origidx);
-+}
-+void sb_pwrctl_init(sb_t *sbh)
-+{
-+sb_clkctl_init(sbh);
++	W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
++	W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
++
++	di->rxin = NEXTRXD(i);
++
++	return (rxp);
 +}
-+/* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
-+uint16
-+sb_clkctl_fast_pwrup_delay(sb_t *sbh)
++
++static void
++dma64_txrotate(di_t *di)
 +{
-+	sb_info_t *si;
-+	uint origidx;
-+	chipcregs_t *cc;
-+	uint slowminfreq;
-+	uint16 fpdelay;
-+	uint intr_val = 0;
++	uint ad;
++	uint nactive;
++	uint rot;
++	uint old, new;
++	uint32 w;
++	uint first, last;
 +
-+	si = SB_INFO(sbh);
-+	fpdelay = 0;
-+	origidx = si->curidx;
++	ASSERT(dma_txsuspendedidle(di));
 +
-+	INTR_OFF(si, intr_val);
++	nactive = dma_txactive(di);
++	ad = B2I((R_REG(&di->d64txregs->status1) & D64_XS1_AD_MASK), dma64dd_t);
++	rot = TXD(ad - di->txin);
 +
-+	if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
-+		goto done;
++	ASSERT(rot < di->ntxd);
 +
-+	if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
-+		goto done;
++	/* full-ring case is a lot harder - don't worry about this */
++	if (rot >= (di->ntxd - nactive)) {
++		DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
++		return;
++	}
 +
-+	slowminfreq = sb_slowclk_freq(si, FALSE);
-+	fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) + (slowminfreq - 1)) / slowminfreq;
++	first = di->txin;
++	last = PREVTXD(di->txout);
 +
-+done:
-+	sb_setcoreidx(sbh, origidx);
-+	INTR_RESTORE(si, intr_val);
-+	return (fpdelay);
-+}
-+uint16 sb_pwrctl_fast_pwrup_delay(sb_t *sbh)
-+{
-+return sb_clkctl_fast_pwrup_delay(sbh);
-+}
-+/* turn primary xtal and/or pll off/on */
-+int
-+sb_clkctl_xtal(sb_t *sbh, uint what, bool on)
-+{
-+	sb_info_t *si;
-+	uint32 in, out, outen;
++	/* move entries starting at last and moving backwards to first */
++	for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
++		new = TXD(old + rot);
 +
-+	si = SB_INFO(sbh);
++		/*
++		 * Move the tx dma descriptor.
++		 * EOT is set only in the last entry in the ring.
++		 */
++		w = R_SM(&di->txd64[old].ctrl1) & ~D64_CTRL1_EOT;
++		if (new == (di->ntxd - 1))
++			w |= D64_CTRL1_EOT;
++		W_SM(&di->txd64[new].ctrl1, w);
 +
-+	switch (BUSTYPE(si->sb.bustype)) {
++		w = R_SM(&di->txd64[old].ctrl2);
++		W_SM(&di->txd64[new].ctrl2, w);
 +
++		W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
++		W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
 +
-+		case PCMCIA_BUS:
-+			return (0);
++		/* zap the old tx dma descriptor address field */
++		W_SM(&di->txd64[old].addrlow, 0xdeadbeef);
++		W_SM(&di->txd64[old].addrhigh, 0xdeadbeef);
 +
++		/* move the corresponding txp[] entry */
++		ASSERT(di->txp[new] == NULL);
++		di->txp[new] = di->txp[old];
++		di->txp[old] = NULL;
++	}
 +
-+		case PCI_BUS:
++	/* update txin and txout */
++	di->txin = ad;
++	di->txout = TXD(di->txout + rot);
++	di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
 +
-+			/* pcie core doesn't have any mapping to control the xtal pu */
-+			if (PCIE(si))
-+				return -1;
++	/* kick the chip */
++	W_REG(&di->d64txregs->ptr, I2B(di->txout, dma64dd_t));
++}
 +
-+			in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof (uint32));
-+			out = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32));
-+			outen = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32));
++#endif
 +
-+			/*
-+			 * Avoid glitching the clock if GPRS is already using it.
-+			 * We can't actually read the state of the PLLPD so we infer it
-+			 * by the value of XTAL_PU which *is* readable via gpioin.
-+			 */
-+			if (on && (in & PCI_CFG_GPIO_XTAL))
-+				return (0);
+diff -Naur linux.old/drivers/net/wl2/hnddma.h linux.dev/drivers/net/wl2/hnddma.h
+--- linux.old/drivers/net/wl2/hnddma.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl2/hnddma.h	2006-04-06 16:32:44.000000000 +0200
+@@ -0,0 +1,71 @@
++/*
++ * Generic Broadcom Home Networking Division (HND) DMA engine SW interface
++ * This supports the following chips: BCM42xx, 44xx, 47xx .
++ *
++ * Copyright 2005, Broadcom Corporation      
++ * All Rights Reserved.      
++ *       
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
++ * $Id$
++ */
 +
-+			if (what & XTAL)
-+				outen |= PCI_CFG_GPIO_XTAL;
-+			if (what & PLL)
-+				outen |= PCI_CFG_GPIO_PLL;
++#ifndef	_hnddma_h_
++#define	_hnddma_h_
 +
-+			if (on) {
-+				/* turn primary xtal on */
-+				if (what & XTAL) {
-+					out |= PCI_CFG_GPIO_XTAL;
-+					if (what & PLL)
-+						out |= PCI_CFG_GPIO_PLL;
-+					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
-+					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32), outen);
-+					OSL_DELAY(XTAL_ON_DELAY);
-+				}
++/* export structure */
++typedef volatile struct {
++	/* rx error counters */
++	uint		rxgiants;	/* rx giant frames */
++	uint		rxnobuf;	/* rx out of dma descriptors */
++	/* tx error counters */
++	uint		txnobuf;	/* tx out of dma descriptors */
++} hnddma_t;
 +
-+				/* turn pll on */
-+				if (what & PLL) {
-+					out &= ~PCI_CFG_GPIO_PLL;
-+					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
-+					OSL_DELAY(2000);
-+				}
-+			} else {
-+				if (what & XTAL)
-+					out &= ~PCI_CFG_GPIO_XTAL;
-+				if (what & PLL)
-+					out |= PCI_CFG_GPIO_PLL;
-+				OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof (uint32), out);
-+				OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof (uint32), outen);
-+			}
++#ifndef di_t
++#define	di_t	void
++#endif
 +
-+		default:
-+			return (-1);
-+	}
++#ifndef osl_t 
++#define osl_t void
++#endif
 +
-+	return (0);
-+}
++/* externs */
++extern void * dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx, 
++			 uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level);
++extern void dma_detach(di_t *di);
++extern void dma_txreset(di_t *di);
++extern void dma_rxreset(di_t *di);
++extern void dma_txinit(di_t *di);
++extern bool dma_txenabled(di_t *di);
++extern void dma_rxinit(di_t *di);
++extern void dma_rxenable(di_t *di);
++extern bool dma_rxenabled(di_t *di);
++extern void dma_txsuspend(di_t *di);
++extern void dma_txresume(di_t *di);
++extern bool dma_txsuspended(di_t *di);
++extern bool dma_txsuspendedidle(di_t *di);
++extern bool dma_txstopped(di_t *di);
++extern bool dma_rxstopped(di_t *di);
++extern int dma_txfast(di_t *di, void *p, uint32 coreflags);
++extern void dma_fifoloopbackenable(di_t *di);
++extern void *dma_rx(di_t *di);
++extern void dma_rxfill(di_t *di);
++extern void dma_txreclaim(di_t *di, bool forceall);
++extern void dma_rxreclaim(di_t *di);
++extern uintptr dma_getvar(di_t *di, char *name);
++extern void *dma_getnexttxp(di_t *di, bool forceall);
++extern void *dma_peeknexttxp(di_t *di);
++extern void *dma_getnextrxp(di_t *di, bool forceall);
++extern void dma_txblock(di_t *di);
++extern void dma_txunblock(di_t *di);
++extern uint dma_txactive(di_t *di);
++extern void dma_txrotate(di_t *di);
 +
-+int sb_pwrctl_xtal(sb_t *sbh, uint what, bool on)
-+{
-+return sb_clkctl_xtal(sbh,what,on);
-+}
++extern void dma_rxpiomode(dma32regs_t *);
++extern void dma_txpioloopback(dma32regs_t *);
 +
-+/* set dynamic clk control mode (forceslow, forcefast, dynamic) */
-+/*   returns true if ignore pll off is set and false if it is not */
-+bool
-+sb_clkctl_clk(sb_t *sbh, uint mode)
-+{
-+	sb_info_t *si;
-+	uint origidx;
-+	chipcregs_t *cc;
-+	uint32 scc;
-+	bool forcefastclk=FALSE;
-+	uint intr_val = 0;
 +
-+	si = SB_INFO(sbh);
++#endif	/* _hnddma_h_ */
+diff -Naur linux.old/drivers/net/wl2/pktq.h linux.dev/drivers/net/wl2/pktq.h
+--- linux.old/drivers/net/wl2/pktq.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl2/pktq.h	2006-04-06 17:33:02.000000000 +0200
+@@ -0,0 +1,83 @@
++/*
++ * Misc useful os-independent macros and functions.
++ *
++ * Copyright 2005, Broadcom Corporation
++ * All Rights Reserved.
++ * 
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
++ * $Id$
++ */
 +
-+	/* chipcommon cores prior to rev6 don't support dynamic clock control */
-+	if (si->sb.ccrev < 6)
-+		return (FALSE);
++#ifndef	_pktq_h_
++#define	_pktq_h_
 +
-+	/* chipcommon cores rev10 are a whole new ball game */
-+	if (si->sb.ccrev >= 10)
-+		return (FALSE);
++/*** driver-only section ***/
++#ifdef BCMDRIVER
 +
-+	INTR_OFF(si, intr_val);
++/* osl packet chain functions */
++extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
++extern uint pkttotlen(osl_t *osh, void *);
 +
-+	origidx = si->curidx;
++#define pktenq(pq, p)		pktq_penq((pq), 0, (p))		/* legacy */
++#define pktdeq(pq)		pktq_pdeq((pq), 0)		/* legacy */
 +
-+	cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
-+	ASSERT(cc != NULL);
++/* osl multi-precedence packet queue */
 +
-+	if (!(R_REG(&cc->capabilities) & CAP_PWR_CTL))
-+		goto done;
++#define PKTQ_LEN_DEFAULT        128
++#define PKTQ_MAX_PREC           8
++struct pktq_prec {
++	void *head;     /* first packet to dequeue */
++	void *tail;     /* last packet to dequeue */
++	uint16 len;     /* number of queued packets */
++	uint16 max;     /* maximum number of queued packets */
++};
 +
-+	switch (mode) {
-+	case CLK_FAST:	/* force fast (pll) clock */
-+		/* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
-+		sb_clkctl_xtal(&si->sb, XTAL, ON);
++struct pktq {
++	struct pktq_prec q[PKTQ_MAX_PREC];
++	uint16 num_prec;        /* number of precedences in use */
++	uint16 hi_prec;         /* rapid dequeue hint (>= highest non-empty prec) */
++	uint16 max;             /* total max packets */
++	uint16 len;             /* total number of packets */
++};
 +
-+		SET_REG(&cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
-+		break;
++/* operations on a specific precedence in packet queue */
 +
-+	case CLK_DYNAMIC:	/* enable dynamic clock control */
-+		scc = R_REG(&cc->slow_clk_ctl);
-+		scc &= ~(SCC_FS | SCC_IP | SCC_XC);
-+		if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
-+			scc |= SCC_XC;
-+		W_REG(&cc->slow_clk_ctl, scc);
++#define pktq_psetmax(pq, prec, _max)    ((pq)->q[prec].max = (_max))
++#define pktq_plen(pq, prec)             ((pq)->q[prec].len)
++#define pktq_pavail(pq, prec)           ((pq)->q[prec].max - (pq)->q[prec].len)
++#define pktq_pfull(pq, prec)            ((pq)->q[prec].len >= (pq)->q[prec].max)
++#define pktq_pempty(pq, prec)           ((pq)->q[prec].len == 0)
 +
-+		/* for dynamic control, we have to release our xtal_pu "force on" */
-+		if (scc & SCC_XC)
-+			sb_clkctl_xtal(&si->sb, XTAL, OFF);
-+		break;
++#define pktq_ppeek(pq, prec)            ((pq)->q[prec].head)
++#define pktq_ppeek_tail(pq, prec)       ((pq)->q[prec].tail)
 +
-+	default:
-+		ASSERT(0);
-+	}
++extern void *pktq_penq(struct pktq *pq, int prec, void *p);
++extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
++extern void *pktq_pdeq(struct pktq *pq, int prec);
++extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
 +
-+	/* Is the h/w forcing the use of the fast clk */
-+	forcefastclk = (bool)((R_REG(&cc->slow_clk_ctl) & SCC_IP) == SCC_IP);
++/* operations on packet queue as a whole */
 +
-+done:
-+	sb_setcoreidx(sbh, origidx);
-+	INTR_RESTORE(si, intr_val);
-+	return (forcefastclk);
-+}
++extern void pktq_init(struct pktq *pq, int num_prec, int max);
 +
-+bool sb_pwrctl_clk(sb_t *sbh, uint mode)
-+{
-+return sb_clkctl_clk(sbh, mode);
-+}
-+/* register driver interrupt disabling and restoring callback functions */
-+void
-+sb_register_intr_callback(sb_t *sbh, void *intrsoff_fn, void *intrsrestore_fn, void *intrsenabled_fn, void *intr_arg)
-+{
-+	sb_info_t *si;
++#define pktq_len(pq)                    ((int)(pq)->len)
++#define pktq_max(pq)                    ((int)(pq)->max)
++#define pktq_avail(pq)                  ((int)((pq)->max - (pq)->len))
++#define pktq_full(pq)                   ((pq)->len >= (pq)->max)
++#define pktq_empty(pq)                  ((pq)->len == 0)
 +
-+	si = SB_INFO(sbh);
-+	si->intr_arg = intr_arg;
-+	si->intrsoff_fn = (sb_intrsoff_t)intrsoff_fn;
-+	si->intrsrestore_fn = (sb_intrsrestore_t)intrsrestore_fn;
-+	si->intrsenabled_fn = (sb_intrsenabled_t)intrsenabled_fn;
-+	/* save current core id.  when this function called, the current core
-+	 * must be the core which provides driver functions(il, et, wl, etc.)
-+	 */
-+	si->dev_coreid = si->coreid[si->curidx];
-+}
++extern void *pktq_deq(struct pktq *pq, int *prec_out);
++extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
++extern void *pktq_peek(struct pktq *pq, int *prec_out);
++extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
 +
++extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
++extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
 +
-+void
-+sb_corepciid(sb_t *sbh, uint16 *pcivendor, uint16 *pcidevice, 
-+	uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif)
-+{
-+	uint vendor, core, unit;
-+	uint chip, chippkg;
-+	char varname[8];
-+	uint8 class, subclass, progif;
-+	
-+	vendor = sb_corevendor(sbh);
-+	core = sb_coreid(sbh);
-+	unit = sb_coreunit(sbh);
++#define PKTQ_PREC_ITER(pq, prec)        for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
 +
-+	chip = BCMINIT(sb_chip)(sbh);
-+	chippkg = BCMINIT(sb_chippkg)(sbh);
++#endif
++#endif	/* _pktq_h_ */
+diff -Naur linux.old/drivers/net/wl2/sbhnddma.h linux.dev/drivers/net/wl2/sbhnddma.h
+--- linux.old/drivers/net/wl2/sbhnddma.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/net/wl2/sbhnddma.h	2006-04-06 16:32:44.000000000 +0200
+@@ -0,0 +1,312 @@
++/*
++ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
++ * This supports the following chips: BCM42xx, 44xx, 47xx .
++ *
++ * Copyright 2005, Broadcom Corporation      
++ * All Rights Reserved.      
++ *       
++ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
++ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
++ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
++ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
++ * $Id$
++ */
 +
-+	progif = 0;
-+	
-+	/* Known vendor translations */
-+	switch (vendor) {
-+	case SB_VEND_BCM:
-+		vendor = VENDOR_BROADCOM;
-+		break;
-+	}
++#ifndef	_sbhnddma_h_
++#define	_sbhnddma_h_
 +
-+	/* Determine class based on known core codes */
-+	switch (core) {
-+	case SB_ILINE20:
-+		class = PCI_CLASS_NET;
-+		subclass = PCI_NET_ETHER;
-+		core = BCM47XX_ILINE_ID;
-+		break;
-+	case SB_ENET:
-+		class = PCI_CLASS_NET;
-+		subclass = PCI_NET_ETHER;
-+		core = BCM47XX_ENET_ID;
-+		break;
-+	case SB_SDRAM:
-+	case SB_MEMC:
-+		class = PCI_CLASS_MEMORY;
-+		subclass = PCI_MEMORY_RAM;
-+		break;
-+	case SB_PCI:
-+	case SB_PCIE:
-+		class = PCI_CLASS_BRIDGE;
-+		subclass = PCI_BRIDGE_PCI;
-+		break;
-+	case SB_MIPS:
-+	case SB_MIPS33:
-+		class = PCI_CLASS_CPU;
-+		subclass = PCI_CPU_MIPS;
-+		break;
-+	case SB_CODEC:
-+		class = PCI_CLASS_COMM;
-+		subclass = PCI_COMM_MODEM;
-+		core = BCM47XX_V90_ID;
-+		break;
-+	case SB_USB:
-+		class = PCI_CLASS_SERIAL;
-+		subclass = PCI_SERIAL_USB;
-+		progif = 0x10; /* OHCI */
-+		core = BCM47XX_USB_ID;
-+		break;
-+	case SB_USB11H:
-+		class = PCI_CLASS_SERIAL;
-+		subclass = PCI_SERIAL_USB;
-+		progif = 0x10; /* OHCI */
-+		core = BCM47XX_USBH_ID;
-+		break;
-+	case SB_USB11D:
-+		class = PCI_CLASS_SERIAL;
-+		subclass = PCI_SERIAL_USB;
-+		core = BCM47XX_USBD_ID;
-+		break;
-+	case SB_IPSEC:
-+		class = PCI_CLASS_CRYPT;
-+		subclass = PCI_CRYPT_NETWORK;
-+		core = BCM47XX_IPSEC_ID;
-+		break;
-+	case SB_ROBO:
-+		class = PCI_CLASS_NET;
-+		subclass = PCI_NET_OTHER;
-+		core = BCM47XX_ROBO_ID;
-+		break;
-+	case SB_EXTIF:
-+	case SB_CC:
-+		class = PCI_CLASS_MEMORY;
-+		subclass = PCI_MEMORY_FLASH;
-+		break;
-+	case SB_D11:
-+		class = PCI_CLASS_NET;
-+		subclass = PCI_NET_OTHER;
-+		/* Let an nvram variable override this */
-+		sprintf(varname, "wl%did", unit);
-+		if ((core = getintvar(NULL, varname)) == 0) {
-+			if (chip == BCM4712_DEVICE_ID) {
-+				if (chippkg == BCM4712SMALL_PKG_ID)
-+					core = BCM4306_D11G_ID;
-+				else
-+					core = BCM4306_D11DUAL_ID;
-+			}
-+		}
-+		break;
++ 
++/* 2byte-wide pio register set per channel(xmt or rcv) */
++typedef volatile struct {
++	uint16	fifocontrol;
++	uint16	fifodata;
++	uint16	fifofree;	/* only valid in xmt channel, not in rcv channel */
++	uint16	PAD;
++} pio2regs_t;
 +
-+	default:
-+		class = subclass = progif = 0xff;
-+		break;
-+	}
++/* a pair of pio channels(tx and rx) */
++typedef volatile struct {
++	pio2regs_t	tx;
++	pio2regs_t	rx;
++} pio2regp_t;
 +
-+	*pcivendor = (uint16)vendor;
-+	*pcidevice = (uint16)core;
-+	*pciclass = class;
-+	*pcisubclass = subclass;
-+	*pciprogif = progif;
-+}
++/* 4byte-wide pio register set per channel(xmt or rcv) */
++typedef volatile struct {
++	uint32	fifocontrol;
++	uint32	fifodata;
++} pio4regs_t;
 +
++/* a pair of pio channels(tx and rx) */
++typedef volatile struct {
++	pio4regs_t	tx;
++	pio4regs_t	rx;
++} pio4regp_t;
 +
 +
 +
-+/* use the mdio interface to write to mdio slaves */
-+static int 
-+sb_pcie_mdiowrite(sb_info_t *si,  uint physmedia, uint regaddr, uint val)
-+{
-+	uint mdiodata;
-+	uint i = 0;
-+	sbpcieregs_t *pcieregs;
++/* DMA structure:
++ *  support two DMA engines: 32 bits address or 64 bit addressing
++ *  basic DMA register set is per channel(transmit or receive)
++ *  a pair of channels is defined for convenience
++ */
 +
-+	pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
-+	ASSERT (pcieregs);
 +
-+	/* enable mdio access to SERDES */		
-+	W_REG((&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
++/*** 32 bits addressing ***/ 
 +
-+	mdiodata = MDIODATA_START | MDIODATA_WRITE | 
-+		(physmedia << MDIODATA_DEVADDR_SHF) |
-+		(regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA | val;
++/* dma registers per channel(xmt or rcv) */
++typedef volatile struct {
++	uint32	control;		/* enable, et al */
++	uint32	addr;			/* descriptor ring base address (4K aligned) */
++	uint32	ptr;			/* last descriptor posted to chip */
++	uint32	status;			/* current active descriptor, et al */
++} dma32regs_t;
 +
-+	W_REG((&pcieregs->mdiodata), mdiodata);
++typedef volatile struct {
++	dma32regs_t	xmt;		/* dma tx channel */
++	dma32regs_t	rcv;		/* dma rx channel */
++} dma32regp_t;
 +
-+	PR28829_DELAY();
++typedef volatile struct {	/* diag access */
++	uint32	fifoaddr;		/* diag address */
++	uint32	fifodatalow;		/* low 32bits of data */
++	uint32	fifodatahigh;		/* high 32bits of data */
++	uint32	pad;			/* reserved */
++} dma32diag_t;
 +
-+	/* retry till the transaction is complete */
-+	while ( i < 10 ) {
-+		if (R_REG(&(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
-+			/* Disable mdio access to SERDES */		
-+			W_REG((&pcieregs->mdiocontrol), 0);
-+			return 0;
-+		}
-+		OSL_DELAY(1000);
-+		i++;
-+	}
++/*
++ * DMA Descriptor
++ * Descriptors are only read by the hardware, never written back.
++ */
++typedef volatile struct {
++	uint32	ctrl;		/* misc control bits & bufcount */
++	uint32	addr;		/* data buffer address */
++} dma32dd_t;
 +
-+	SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
-+	/* Disable mdio access to SERDES */		
-+	W_REG((&pcieregs->mdiocontrol), 0);
-+	ASSERT(0);
-+	return 1; 
++/*
++ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
++ */
++#define	D32MAXRINGSZ	4096
++#define	D32RINGALIGN	4096
++#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
 +
-+}
++/* transmit channel control */
++#define	XC_XE		((uint32)1 << 0)	/* transmit enable */
++#define	XC_SE		((uint32)1 << 1)	/* transmit suspend request */
++#define	XC_LE		((uint32)1 << 2)	/* loopback enable */
++#define	XC_FL		((uint32)1 << 4)	/* flush request */
++#define	XC_AE		((uint32)3 << 16)	/* address extension bits */
++#define	XC_AE_SHIFT	16
 +
-+/* indirect way to read pcie config regs*/
-+uint 
-+sb_pcie_readreg(void *sb, void* arg1, uint offset)
-+{
-+	sb_info_t *si;
-+	sb_t   *sbh;
-+	uint retval = 0xFFFFFFFF;
-+	sbpcieregs_t *pcieregs;	
-+	uint addrtype;
++/* transmit descriptor table pointer */
++#define	XP_LD_MASK	0xfff			/* last valid descriptor */
 +
-+	sbh = (sb_t *)sb;
-+	si = SB_INFO(sbh);
-+	ASSERT (PCIE(si)); 
++/* transmit channel status */
++#define	XS_CD_MASK	0x0fff			/* current descriptor pointer */
++#define	XS_XS_MASK	0xf000			/* transmit state */
++#define	XS_XS_SHIFT	12
++#define	XS_XS_DISABLED	0x0000			/* disabled */
++#define	XS_XS_ACTIVE	0x1000			/* active */
++#define	XS_XS_IDLE	0x2000			/* idle wait */
++#define	XS_XS_STOPPED	0x3000			/* stopped */
++#define	XS_XS_SUSP	0x4000			/* suspend pending */
++#define	XS_XE_MASK	0xf0000			/* transmit errors */
++#define	XS_XE_SHIFT	16
++#define	XS_XE_NOERR	0x00000			/* no error */
++#define	XS_XE_DPE	0x10000			/* descriptor protocol error */
++#define	XS_XE_DFU	0x20000			/* data fifo underrun */
++#define	XS_XE_BEBR	0x30000			/* bus error on buffer read */
++#define	XS_XE_BEDA	0x40000			/* bus error on descriptor access */
++#define	XS_AD_MASK	0xfff00000		/* active descriptor */
++#define	XS_AD_SHIFT	20
 +
-+	pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
-+	ASSERT (pcieregs);
++/* receive channel control */
++#define	RC_RE		((uint32)1 << 0)	/* receive enable */
++#define	RC_RO_MASK	0xfe			/* receive frame offset */
++#define	RC_RO_SHIFT	1
++#define	RC_FM		((uint32)1 << 8)	/* direct fifo receive (pio) mode */
++#define	RC_AE		((uint32)3 << 16)	/* address extension bits */
++#define	RC_AE_SHIFT	16
 +
-+	addrtype = (uint)((uintptr)arg1);
-+	switch(addrtype) {
-+		case PCIE_CONFIGREGS:
-+			W_REG((&pcieregs->configaddr),offset);
-+			retval = R_REG(&(pcieregs->configdata));
-+			break;
-+		case PCIE_PCIEREGS:
-+			W_REG(&(pcieregs->pcieaddr),offset);
-+			retval = R_REG(&(pcieregs->pciedata));
-+			break;
-+		default:
-+			ASSERT(0); 
-+			break;
-+	}
-+	return retval;
-+}
++/* receive descriptor table pointer */
++#define	RP_LD_MASK	0xfff			/* last valid descriptor */
 +
-+/* indirect way to write pcie config/mdio/pciecore regs*/
-+uint 
-+sb_pcie_writereg(sb_t *sbh, void *arg1,  uint offset, uint val)
-+{
-+	sb_info_t *si;
-+	sbpcieregs_t *pcieregs;	
-+	uint addrtype;
++/* receive channel status */
++#define	RS_CD_MASK	0x0fff			/* current descriptor pointer */
++#define	RS_RS_MASK	0xf000			/* receive state */
++#define	RS_RS_SHIFT	12
++#define	RS_RS_DISABLED	0x0000			/* disabled */
++#define	RS_RS_ACTIVE	0x1000			/* active */
++#define	RS_RS_IDLE	0x2000			/* idle wait */
++#define	RS_RS_STOPPED	0x3000			/* reserved */
++#define	RS_RE_MASK	0xf0000			/* receive errors */
++#define	RS_RE_SHIFT	16
++#define	RS_RE_NOERR	0x00000			/* no error */
++#define	RS_RE_DPE	0x10000			/* descriptor protocol error */
++#define	RS_RE_DFO	0x20000			/* data fifo overflow */
++#define	RS_RE_BEBW	0x30000			/* bus error on buffer write */
++#define	RS_RE_BEDA	0x40000			/* bus error on descriptor access */
++#define	RS_AD_MASK	0xfff00000		/* active descriptor */
++#define	RS_AD_SHIFT	20
 +
-+	si = SB_INFO(sbh);
-+	ASSERT (PCIE(si)); 
++/* fifoaddr */
++#define	FA_OFF_MASK	0xffff			/* offset */
++#define	FA_SEL_MASK	0xf0000			/* select */
++#define	FA_SEL_SHIFT	16
++#define	FA_SEL_XDD	0x00000			/* transmit dma data */
++#define	FA_SEL_XDP	0x10000			/* transmit dma pointers */
++#define	FA_SEL_RDD	0x40000			/* receive dma data */
++#define	FA_SEL_RDP	0x50000			/* receive dma pointers */
++#define	FA_SEL_XFD	0x80000			/* transmit fifo data */
++#define	FA_SEL_XFP	0x90000			/* transmit fifo pointers */
++#define	FA_SEL_RFD	0xc0000			/* receive fifo data */
++#define	FA_SEL_RFP	0xd0000			/* receive fifo pointers */
++#define	FA_SEL_RSD	0xe0000			/* receive frame status data */
++#define	FA_SEL_RSP	0xf0000			/* receive frame status pointers */
 +
-+	pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
-+	ASSERT (pcieregs);
++/* descriptor control flags */
++#define	CTRL_BC_MASK	0x1fff			/* buffer byte count */
++#define	CTRL_AE		((uint32)3 << 16)	/* address extension bits */
++#define	CTRL_AE_SHIFT	16
++#define	CTRL_EOT	((uint32)1 << 28)	/* end of descriptor table */
++#define	CTRL_IOC	((uint32)1 << 29)	/* interrupt on completion */
++#define	CTRL_EOF	((uint32)1 << 30)	/* end of frame */
++#define	CTRL_SOF	((uint32)1 << 31)	/* start of frame */
 +
-+	addrtype = (uint)((uintptr)arg1);
++/* control flags in the range [27:20] are core-specific and not defined here */
++#define	CTRL_CORE_MASK	0x0ff00000
 +
-+	switch(addrtype) {
-+		case PCIE_CONFIGREGS:
-+			W_REG((&pcieregs->configaddr),offset);
-+			W_REG((&pcieregs->configdata),val);
-+			break;
-+		case PCIE_PCIEREGS:
-+			W_REG((&pcieregs->pcieaddr),offset);
-+			W_REG((&pcieregs->pciedata),val);
-+			break;
-+		default:
-+			ASSERT(0); 
-+			break;
-+	}
-+	return 0;
-+}
++/*** 64 bits addressing ***/
 +
++/* dma registers per channel(xmt or rcv) */
++typedef volatile struct {
++	uint32	control;		/* enable, et al */
++	uint32	ptr;			/* last descriptor posted to chip */
++	uint32	addrlow;		/* descriptor ring base address low 32-bits (8K aligned) */
++	uint32	addrhigh;		/* descriptor ring base address bits 63:32 (8K aligned) */
++	uint32	status0;		/* current descriptor, xmt state */
++	uint32	status1;		/* active descriptor, xmt error */
++} dma64regs_t;
 +
-+/* Build device path. Support SB, PCI, and JTAG for now. */
-+int
-+sb_devpath(sb_t *sbh, char *path, int size)
-+{
-+	ASSERT(path);
-+	ASSERT(size >= SB_DEVPATH_BUFSZ);
-+	
-+	switch (BUSTYPE((SB_INFO(sbh))->sb.bustype)) {
-+	case SB_BUS:
-+	case JTAG_BUS:
-+		sprintf(path, "sb/%u/", sb_coreidx(sbh));
-+		break;
-+	case PCI_BUS:
-+		ASSERT((SB_INFO(sbh))->osh);
-+		sprintf(path, "pci/%u/%u/", OSL_PCI_BUS((SB_INFO(sbh))->osh),
-+			OSL_PCI_SLOT((SB_INFO(sbh))->osh));
-+		break;
-+	case PCMCIA_BUS:
-+		SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
-+		SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
-+		sprintf(path, "pc/%u/%u/", 1, 1);
-+		break;
-+	case SDIO_BUS:
-+		SB_ERROR(("sb_devpath: device 0 assumed\n"));
-+		sprintf(path, "sd/%u/", sb_coreidx(sbh));
-+		break;
-+	default:
-+		ASSERT(0);
-+		break;
-+	}
++typedef volatile struct {
++	dma64regs_t	tx;		/* dma64 tx channel */
++	dma64regs_t	rx;		/* dma64 rx channel */
++} dma64regp_t;
 +
-+	return 0;
-+}
++typedef volatile struct {		/* diag access */
++	uint32	fifoaddr;		/* diag address */
++	uint32	fifodatalow;		/* low 32bits of data */
++	uint32	fifodatahigh;		/* high 32bits of data */
++	uint32	pad;			/* reserved */
++} dma64diag_t;
 +
-+/* Fix chip's configuration. The current core may be changed upon return */
-+static int
-+sb_pci_fixcfg(sb_info_t *si)
-+{
-+	uint origidx, pciidx;
-+	sbpciregs_t *pciregs;
-+	sbpcieregs_t *pcieregs;
-+	uint16 val16, *reg16;
-+	char name[SB_DEVPATH_BUFSZ+16], *value;
-+	char devpath[SB_DEVPATH_BUFSZ];
++/*
++ * DMA Descriptor
++ * Descriptors are only read by the hardware, never written back.
++ */
++typedef volatile struct {
++	uint32	ctrl1;		/* misc control bits & bufcount */
++	uint32	ctrl2;		/* buffer count and address extension */
++	uint32	addrlow;	/* memory address of the first byte of the date buffer, bits 31:0 */
++	uint32	addrhigh;	/* memory address of the first byte of the date buffer, bits 63:32 */
++} dma64dd_t;
 +
-+	ASSERT(BUSTYPE(si->sb.bustype) == PCI_BUS);
++/*
++ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
++ */
++#define	D64MAXRINGSZ	8192
++#define	D64RINGALIGN	8192
++#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
 +
-+	/* Fix PCI(e) SROM shadow area */
-+	/* save the current index */
-+	origidx = sb_coreidx(&si->sb);
++/* transmit channel control */
++#define	D64_XC_XE		0x00000001	/* transmit enable */
++#define	D64_XC_SE		0x00000002	/* transmit suspend request */
++#define	D64_XC_LE		0x00000004	/* loopback enable */
++#define	D64_XC_FL		0x00000010	/* flush request */
++#define	D64_XC_AE		0x00110000	/* address extension bits */
++#define	D64_XC_AE_SHIFT		16
 +
-+	/* check 'pi' is correct and fix it if not */
-+	if (si->sb.buscoretype == SB_PCIE) {
-+		pcieregs = (sbpcieregs_t *)sb_setcore(&si->sb, SB_PCIE, 0);
-+		ASSERT(pcieregs);
-+		reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
-+	}
-+	else if (si->sb.buscoretype == SB_PCI) {
-+		pciregs = (sbpciregs_t *)sb_setcore(&si->sb, SB_PCI, 0);
-+		ASSERT(pciregs);
-+		reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
-+	}
-+	else {
-+		ASSERT(0);
-+		return -1;
-+	}
-+	pciidx = sb_coreidx(&si->sb);
-+	val16 = R_REG(reg16);
-+	if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
-+		val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
-+		W_REG(reg16, val16);
-+	}
++/* transmit descriptor table pointer */
++#define	D64_XP_LD_MASK		0x00000fff	/* last valid descriptor */
 +
-+	/* restore the original index */
-+	sb_setcoreidx(&si->sb, origidx);
-+	
-+	/* Fix bar0window */
-+	/* !do it last, it changes the current core! */
-+	if (sb_devpath(&si->sb, devpath, sizeof(devpath)))
-+		return -1;
-+	sprintf(name, "%sb0w", devpath);
-+	if ((value = getvar(NULL, name))) {
-+		OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32),
-+			bcm_strtoul(value, NULL, 16));
-+		/* update curidx since the current core is changed */
-+		si->curidx = _sb_coreidx(si);
-+		if (si->curidx == BADIDX) {
-+			SB_ERROR(("sb_pci_fixcfg: bad core index\n"));
-+			return -1;
-+		}
-+	}
++/* transmit channel status */
++#define	D64_XS0_CD_MASK		0x00001fff	/* current descriptor pointer */
++#define	D64_XS0_XS_MASK		0xf0000000     	/* transmit state */
++#define	D64_XS0_XS_SHIFT		28
++#define	D64_XS0_XS_DISABLED	0x00000000	/* disabled */
++#define	D64_XS0_XS_ACTIVE	0x10000000	/* active */
++#define	D64_XS0_XS_IDLE		0x20000000	/* idle wait */
++#define	D64_XS0_XS_STOPPED	0x30000000	/* stopped */
++#define	D64_XS0_XS_SUSP		0x40000000	/* suspend pending */
 +
-+	return 0;
-+}
++#define	D64_XS1_AD_MASK		0x0001ffff	/* active descriptor */
++#define	D64_XS1_XE_MASK		0xf0000000     	/* transmit errors */
++#define	D64_XS1_XE_SHIFT		28
++#define	D64_XS1_XE_NOERR	0x00000000	/* no error */
++#define	D64_XS1_XE_DPE		0x10000000	/* descriptor protocol error */
++#define	D64_XS1_XE_DFU		0x20000000	/* data fifo underrun */
++#define	D64_XS1_XE_DTE		0x30000000	/* data transfer error */
++#define	D64_XS1_XE_DESRE	0x40000000	/* descriptor read error */
++#define	D64_XS1_XE_COREE	0x50000000	/* core error */
 +
-diff -Nur linux-2.4.32/drivers/net/hnd/shared_ksyms.sh linux-2.4.32-brcm/drivers/net/hnd/shared_ksyms.sh
---- linux-2.4.32/drivers/net/hnd/shared_ksyms.sh	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/hnd/shared_ksyms.sh	2005-12-16 23:39:11.316860000 +0100
-@@ -0,0 +1,21 @@
-+#!/bin/sh
-+#
-+# Copyright 2004, Broadcom Corporation      
-+# All Rights Reserved.      
-+#       
-+# THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY      
-+# KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM      
-+# SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS      
-+# FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.      
-+#
-+# $Id: shared_ksyms.sh,v 1.1 2005/03/16 13:50:00 wbx Exp $
-+#
++/* receive channel control */
++#define	D64_RC_RE		0x00000001	/* receive enable */
++#define	D64_RC_RO_MASK		0x000000fe	/* receive frame offset */
++#define	D64_RC_RO_SHIFT		1
++#define	D64_RC_FM		0x00000100	/* direct fifo receive (pio) mode */
++#define	D64_RC_AE		0x00110000	/* address extension bits */
++#define	D64_RC_AE_SHIFT		16
 +
-+cat <<EOF
-+#include <linux/config.h>
-+#include <linux/module.h>
-+EOF
++/* receive descriptor table pointer */
++#define	D64_RP_LD_MASK		0x00000fff	/* last valid descriptor */
 +
-+for file in $* ; do
-+    ${NM} $file | sed -ne 's/[0-9A-Fa-f]* [DT] \([^ ]*\)/extern void \1; EXPORT_SYMBOL(\1);/p'
-+done
-diff -Nur linux-2.4.32/drivers/net/Makefile linux-2.4.32-brcm/drivers/net/Makefile
---- linux-2.4.32/drivers/net/Makefile	2005-01-19 15:09:56.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/Makefile	2005-12-16 23:39:11.284858000 +0100
-@@ -3,6 +3,8 @@
- # Makefile for the Linux network (ethercard) device drivers.
- #
- 
-+EXTRA_CFLAGS := -I$(TOPDIR)/arch/mips/bcm947xx/include
++/* receive channel status */
++#define	D64_RS0_CD_MASK		0x00001fff	/* current descriptor pointer */
++#define	D64_RS0_RS_MASK		0xf0000000     	/* receive state */
++#define	D64_RS0_RS_SHIFT		28
++#define	D64_RS0_RS_DISABLED	0x00000000	/* disabled */
++#define	D64_RS0_RS_ACTIVE	0x10000000	/* active */
++#define	D64_RS0_RS_IDLE		0x20000000	/* idle wait */
++#define	D64_RS0_RS_STOPPED	0x30000000	/* stopped */
++#define	D64_RS0_RS_SUSP		0x40000000	/* suspend pending */
 +
- obj-y           :=
- obj-m           :=
- obj-n           :=
-@@ -39,6 +41,8 @@
-   obj-$(CONFIG_ISDN) += slhc.o
- endif
- 
-+subdir-$(CONFIG_HND) += hnd
-+subdir-$(CONFIG_WL) += wl
- subdir-$(CONFIG_NET_PCMCIA) += pcmcia
- subdir-$(CONFIG_NET_WIRELESS) += wireless
- subdir-$(CONFIG_TULIP) += tulip
-@@ -69,6 +74,13 @@
- obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
- obj-$(CONFIG_SUNGEM) += sungem.o
- 
-+ifeq ($(CONFIG_HND),y)
-+  obj-y += hnd/hnd.o
-+endif
-+ifeq ($(CONFIG_WL),y)
-+  obj-y += wl/wl.o
-+endif
++#define	D64_RS1_AD_MASK		0x0001ffff	/* active descriptor */
++#define	D64_RS1_RE_MASK		0xf0000000     	/* receive errors */
++#define	D64_RS1_RE_SHIFT		28
++#define	D64_RS1_RE_NOERR	0x00000000	/* no error */
++#define	D64_RS1_RE_DPO		0x10000000	/* descriptor protocol error */
++#define	D64_RS1_RE_DFU		0x20000000	/* data fifo overflow */
++#define	D64_RS1_RE_DTE		0x30000000	/* data transfer error */
++#define	D64_RS1_RE_DESRE	0x40000000	/* descriptor read error */
++#define	D64_RS1_RE_COREE	0x50000000	/* core error */
 +
- obj-$(CONFIG_MACE) += mace.o
- obj-$(CONFIG_BMAC) += bmac.o
- obj-$(CONFIG_GMAC) += gmac.o
-diff -Nur linux-2.4.32/drivers/net/wireless/Config.in linux-2.4.32-brcm/drivers/net/wireless/Config.in
---- linux-2.4.32/drivers/net/wireless/Config.in	2004-11-17 12:54:21.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/wireless/Config.in	2005-12-16 23:39:11.364863000 +0100
-@@ -13,6 +13,7 @@
- fi
- 
- if [ "$CONFIG_PCI" = "y" ]; then
-+   dep_tristate '    Proprietary Broadcom BCM43xx 802.11 Wireless support' CONFIG_WL
-    dep_tristate '    Hermes in PLX9052 based PCI adaptor support (Netgear MA301 etc.) (EXPERIMENTAL)' CONFIG_PLX_HERMES $CONFIG_HERMES $CONFIG_EXPERIMENTAL
-    dep_tristate '    Hermes in TMD7160/NCP130 based PCI adaptor support (Pheecom WL-PCI etc.) (EXPERIMENTAL)' CONFIG_TMD_HERMES $CONFIG_HERMES $CONFIG_EXPERIMENTAL
-    dep_tristate '    Prism 2.5 PCI 802.11b adaptor support (EXPERIMENTAL)' CONFIG_PCI_HERMES $CONFIG_HERMES $CONFIG_EXPERIMENTAL
-diff -Nur linux-2.4.32/drivers/net/wl/Makefile linux-2.4.32-brcm/drivers/net/wl/Makefile
---- linux-2.4.32/drivers/net/wl/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/net/wl/Makefile	2005-12-16 23:39:11.364863000 +0100
-@@ -0,0 +1,26 @@
-+#
-+# Makefile for the Broadcom wl driver
-+#
-+# Copyright 2004, Broadcom Corporation
-+# All Rights Reserved.
-+# 
-+# THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
-+# KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
-+# SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
-+# FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
-+#
-+# $Id: Makefile,v 1.2 2005/03/29 03:32:18 mbm Exp $
++/* fifoaddr */
++#define	D64_FA_OFF_MASK		0xffff		/* offset */
++#define	D64_FA_SEL_MASK		0xf0000		/* select */
++#define	D64_FA_SEL_SHIFT	16
++#define	D64_FA_SEL_XDD		0x00000		/* transmit dma data */
++#define	D64_FA_SEL_XDP		0x10000		/* transmit dma pointers */
++#define	D64_FA_SEL_RDD		0x40000		/* receive dma data */
++#define	D64_FA_SEL_RDP		0x50000		/* receive dma pointers */
++#define	D64_FA_SEL_XFD		0x80000		/* transmit fifo data */
++#define	D64_FA_SEL_XFP		0x90000		/* transmit fifo pointers */
++#define	D64_FA_SEL_RFD		0xc0000		/* receive fifo data */
++#define	D64_FA_SEL_RFP		0xd0000		/* receive fifo pointers */
++#define	D64_FA_SEL_RSD		0xe0000		/* receive frame status data */
++#define	D64_FA_SEL_RSP		0xf0000		/* receive frame status pointers */
 +
-+EXTRA_CFLAGS += -I$(TOPDIR)/arch/mips/bcm947xx/include
++/* descriptor control flags 1 */
++#define	D64_CTRL1_EOT		((uint32)1 << 28)	/* end of descriptor table */
++#define	D64_CTRL1_IOC		((uint32)1 << 29)	/* interrupt on completion */
++#define	D64_CTRL1_EOF		((uint32)1 << 30)	/* end of frame */
++#define	D64_CTRL1_SOF		((uint32)1 << 31)	/* start of frame */
 +
-+O_TARGET	:= wl.o
++/* descriptor control flags 2 */
++#define	D64_CTRL2_BC_MASK	0x00007fff	/* buffer byte count mask */
++#define	D64_CTRL2_AE		0x00110000	/* address extension bits */
++#define	D64_CTRL2_AE_SHIFT	16
 +
-+obj-y		:= apsta_aeskeywrap.o apsta_aes.o apsta_bcmwpa.o apsta_d11ucode.o
-+obj-y		+= apsta_hmac.o apsta_md5.o apsta_passhash.o apsta_prf.o apsta_rc4.o
-+obj-y		+= apsta_rijndael-alg-fst.o apsta_sha1.o apsta_tkhash.o apsta_wlc_led.o
-+obj-y		+= apsta_wlc_phy.o apsta_wlc_rate.o apsta_wlc_security.o 
-+obj-y		+= apsta_wlc_sup.o apsta_wlc_wet.o apsta_wl_linux.o apsta_wlc.o
++/* control flags in the range [27:20] are core-specific and not defined here */
++#define	D64_CTRL_CORE_MASK	0x0ff00000
 +
-+obj-m		:= $(O_TARGET)
 +
-+include $(TOPDIR)/Rules.make
-diff -Nur linux-2.4.32/drivers/parport/Config.in linux-2.4.32-brcm/drivers/parport/Config.in
---- linux-2.4.32/drivers/parport/Config.in	2004-02-18 14:36:31.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/parport/Config.in	2005-12-16 23:39:11.364863000 +0100
++#endif	/* _sbhnddma_h_ */
+diff -Naur linux.old/drivers/parport/Config.in linux.dev/drivers/parport/Config.in
+--- linux.old/drivers/parport/Config.in	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/drivers/parport/Config.in	2006-04-06 15:34:15.000000000 +0200
 @@ -11,6 +11,7 @@
  tristate 'Parallel port support' CONFIG_PARPORT
  if [ "$CONFIG_PARPORT" != "n" ]; then
@@ -17863,9 +20224,9 @@ diff -Nur linux-2.4.32/drivers/parport/Config.in linux-2.4.32-brcm/drivers/parpo
     if [ "$CONFIG_PARPORT_PC" != "n" -a "$CONFIG_SERIAL" != "n" ]; then
        if [ "$CONFIG_SERIAL" = "m" ]; then
           define_tristate CONFIG_PARPORT_PC_CML1 m
-diff -Nur linux-2.4.32/drivers/parport/Makefile linux-2.4.32-brcm/drivers/parport/Makefile
---- linux-2.4.32/drivers/parport/Makefile	2004-08-08 01:26:05.000000000 +0200
-+++ linux-2.4.32-brcm/drivers/parport/Makefile	2005-12-16 23:39:11.364863000 +0100
+diff -Naur linux.old/drivers/parport/Makefile linux.dev/drivers/parport/Makefile
+--- linux.old/drivers/parport/Makefile	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/drivers/parport/Makefile	2006-04-06 15:34:15.000000000 +0200
 @@ -22,6 +22,7 @@
  
  obj-$(CONFIG_PARPORT)		+= parport.o
@@ -17874,9 +20235,9 @@ diff -Nur linux-2.4.32/drivers/parport/Makefile linux-2.4.32-brcm/drivers/parpor
  obj-$(CONFIG_PARPORT_PC_PCMCIA)	+= parport_cs.o
  obj-$(CONFIG_PARPORT_AMIGA)	+= parport_amiga.o
  obj-$(CONFIG_PARPORT_MFC3)	+= parport_mfc3.o
-diff -Nur linux-2.4.32/drivers/parport/parport_splink.c linux-2.4.32-brcm/drivers/parport/parport_splink.c
---- linux-2.4.32/drivers/parport/parport_splink.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/parport/parport_splink.c	2005-12-16 23:39:11.364863000 +0100
+diff -Naur linux.old/drivers/parport/parport_splink.c linux.dev/drivers/parport/parport_splink.c
+--- linux.old/drivers/parport/parport_splink.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/parport/parport_splink.c	2006-04-06 15:34:15.000000000 +0200
 @@ -0,0 +1,345 @@
 +/* Low-level parallel port routines for the ASUS WL-500g built-in port
 + *
@@ -18223,9 +20584,32 @@ diff -Nur linux-2.4.32/drivers/parport/parport_splink.c linux-2.4.32-brcm/driver
 +module_init(parport_splink_init)
 +module_exit(parport_splink_cleanup)
 +
-diff -Nur linux-2.4.32/drivers/pcmcia/bcm4710_generic.c linux-2.4.32-brcm/drivers/pcmcia/bcm4710_generic.c
---- linux-2.4.32/drivers/pcmcia/bcm4710_generic.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/pcmcia/bcm4710_generic.c	2005-12-16 23:39:11.368863250 +0100
+diff -Naur linux.old/drivers/pcmcia/Makefile linux.dev/drivers/pcmcia/Makefile
+--- linux.old/drivers/pcmcia/Makefile	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/drivers/pcmcia/Makefile	2006-04-06 15:34:15.000000000 +0200
+@@ -74,6 +74,10 @@
+ au1000_ss-objs-$(CONFIG_MIPS_HYDROGEN3)		+= au1000_hydrogen3.o
+ au1000_ss-objs-$(CONFIG_MIPS_XXS1500) 		+= au1000_xxs1500.o
+ 
++obj-$(CONFIG_PCMCIA_BCM4710)	+= bcm4710_ss.o
++bcm4710_ss-objs					:= bcm4710_generic.o
++bcm4710_ss-objs					+= bcm4710_pcmcia.o
++
+ obj-$(CONFIG_PCMCIA_SA1100)	+= sa1100_cs.o
+ obj-$(CONFIG_PCMCIA_M8XX)	+= m8xx_pcmcia.o
+ obj-$(CONFIG_PCMCIA_SIBYTE)	+= sibyte_generic.o
+@@ -112,5 +116,8 @@
+ au1x00_ss.o: $(au1000_ss-objs-y)
+ 	$(LD) -r -o $@ $(au1000_ss-objs-y)
+ 
++bcm4710_ss.o: $(bcm4710_ss-objs)
++	$(LD) -r -o $@ $(bcm4710_ss-objs)
++
+ yenta_socket.o: $(yenta_socket-objs)
+ 	$(LD) $(LD_RFLAG) -r -o $@ $(yenta_socket-objs)
+diff -Naur linux.old/drivers/pcmcia/bcm4710_generic.c linux.dev/drivers/pcmcia/bcm4710_generic.c
+--- linux.old/drivers/pcmcia/bcm4710_generic.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/pcmcia/bcm4710_generic.c	2006-04-06 15:34:15.000000000 +0200
 @@ -0,0 +1,912 @@
 +/*
 + *
@@ -19139,9 +21523,9 @@ diff -Nur linux-2.4.32/drivers/pcmcia/bcm4710_generic.c linux-2.4.32-brcm/driver
 +
 +
 +#endif  /* defined(CONFIG_PROC_FS) */
-diff -Nur linux-2.4.32/drivers/pcmcia/bcm4710_pcmcia.c linux-2.4.32-brcm/drivers/pcmcia/bcm4710_pcmcia.c
---- linux-2.4.32/drivers/pcmcia/bcm4710_pcmcia.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/pcmcia/bcm4710_pcmcia.c	2005-12-16 23:39:11.368863250 +0100
+diff -Naur linux.old/drivers/pcmcia/bcm4710_pcmcia.c linux.dev/drivers/pcmcia/bcm4710_pcmcia.c
+--- linux.old/drivers/pcmcia/bcm4710_pcmcia.c	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/pcmcia/bcm4710_pcmcia.c	2006-04-06 15:34:15.000000000 +0200
 @@ -0,0 +1,266 @@
 +/*
 + * BCM4710 specific pcmcia routines.
@@ -19409,9 +21793,9 @@ diff -Nur linux-2.4.32/drivers/pcmcia/bcm4710_pcmcia.c linux-2.4.32-brcm/drivers
 +	bcm4710_pcmcia_configure_socket
 +};
 +
-diff -Nur linux-2.4.32/drivers/pcmcia/bcm4710pcmcia.h linux-2.4.32-brcm/drivers/pcmcia/bcm4710pcmcia.h
---- linux-2.4.32/drivers/pcmcia/bcm4710pcmcia.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/pcmcia/bcm4710pcmcia.h	2005-12-16 23:39:11.368863250 +0100
+diff -Naur linux.old/drivers/pcmcia/bcm4710pcmcia.h linux.dev/drivers/pcmcia/bcm4710pcmcia.h
+--- linux.old/drivers/pcmcia/bcm4710pcmcia.h	1970-01-01 01:00:00.000000000 +0100
++++ linux.dev/drivers/pcmcia/bcm4710pcmcia.h	2006-04-06 15:34:15.000000000 +0200
 @@ -0,0 +1,118 @@
 +/*
 + *
@@ -19531,32 +21915,9 @@ diff -Nur linux-2.4.32/drivers/pcmcia/bcm4710pcmcia.h linux-2.4.32-brcm/drivers/
 +extern struct pcmcia_low_level bcm4710_pcmcia_ops;
 +
 +#endif  /* !defined(_BCM4710PCMCIA_H) */
-diff -Nur linux-2.4.32/drivers/pcmcia/Makefile linux-2.4.32-brcm/drivers/pcmcia/Makefile
---- linux-2.4.32/drivers/pcmcia/Makefile	2004-02-18 14:36:31.000000000 +0100
-+++ linux-2.4.32-brcm/drivers/pcmcia/Makefile	2005-12-16 23:39:11.364863000 +0100
-@@ -65,6 +65,10 @@
- au1000_ss-objs-$(CONFIG_PCMCIA_DB1X00)		+= au1000_db1x00.o
- au1000_ss-objs-$(CONFIG_PCMCIA_XXS1500) 	+= au1000_xxs1500.o
- 
-+obj-$(CONFIG_PCMCIA_BCM4710)	+= bcm4710_ss.o
-+bcm4710_ss-objs					:= bcm4710_generic.o
-+bcm4710_ss-objs					+= bcm4710_pcmcia.o
-+
- obj-$(CONFIG_PCMCIA_SA1100)	+= sa1100_cs.o
- obj-$(CONFIG_PCMCIA_M8XX)	+= m8xx_pcmcia.o
- obj-$(CONFIG_PCMCIA_SIBYTE)	+= sibyte_generic.o
-@@ -102,5 +106,8 @@
- au1x00_ss.o: $(au1000_ss-objs-y)
- 	$(LD) -r -o $@ $(au1000_ss-objs-y)
- 
-+bcm4710_ss.o: $(bcm4710_ss-objs)
-+	$(LD) -r -o $@ $(bcm4710_ss-objs)
-+
- yenta_socket.o: $(yenta_socket-objs)
- 	$(LD) $(LD_RFLAG) -r -o $@ $(yenta_socket-objs)
-diff -Nur linux-2.4.32/include/asm-mips/bootinfo.h linux-2.4.32-brcm/include/asm-mips/bootinfo.h
---- linux-2.4.32/include/asm-mips/bootinfo.h	2004-02-18 14:36:32.000000000 +0100
-+++ linux-2.4.32-brcm/include/asm-mips/bootinfo.h	2005-12-16 23:39:11.400865250 +0100
+diff -Naur linux.old/include/asm-mips/bootinfo.h linux.dev/include/asm-mips/bootinfo.h
+--- linux.old/include/asm-mips/bootinfo.h	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/include/asm-mips/bootinfo.h	2006-04-06 15:34:15.000000000 +0200
 @@ -37,6 +37,7 @@
  #define MACH_GROUP_HP_LJ       20 /* Hewlett Packard LaserJet               */
  #define MACH_GROUP_LASAT       21
@@ -19565,7 +21926,7 @@ diff -Nur linux-2.4.32/include/asm-mips/bootinfo.h linux-2.4.32-brcm/include/asm
  
  /*
   * Valid machtype values for group unknown (low order halfword of mips_machtype)
-@@ -194,6 +195,15 @@
+@@ -197,6 +198,15 @@
  #define MACH_TANBAC_TB0229	7	/* TANBAC TB0229 (VR4131DIMM) */
  
  /*
@@ -19581,9 +21942,9 @@ diff -Nur linux-2.4.32/include/asm-mips/bootinfo.h linux-2.4.32-brcm/include/asm
   * Valid machtype for group TITAN
   */
  #define	MACH_TITAN_YOSEMITE	1 	/* PMC-Sierra Yosemite */
-diff -Nur linux-2.4.32/include/asm-mips/cpu.h linux-2.4.32-brcm/include/asm-mips/cpu.h
---- linux-2.4.32/include/asm-mips/cpu.h	2005-01-19 15:10:11.000000000 +0100
-+++ linux-2.4.32-brcm/include/asm-mips/cpu.h	2005-12-16 23:39:11.412866000 +0100
+diff -Naur linux.old/include/asm-mips/cpu.h linux.dev/include/asm-mips/cpu.h
+--- linux.old/include/asm-mips/cpu.h	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/include/asm-mips/cpu.h	2006-04-06 15:34:15.000000000 +0200
 @@ -22,6 +22,11 @@
     spec.
  */
@@ -19632,10 +21993,10 @@ diff -Nur linux-2.4.32/include/asm-mips/cpu.h linux-2.4.32-brcm/include/asm-mips
  
  /*
   * ISA Level encodings
-diff -Nur linux-2.4.32/include/asm-mips/r4kcache.h linux-2.4.32-brcm/include/asm-mips/r4kcache.h
---- linux-2.4.32/include/asm-mips/r4kcache.h	2004-02-18 14:36:32.000000000 +0100
-+++ linux-2.4.32-brcm/include/asm-mips/r4kcache.h	2005-12-16 23:39:11.416866250 +0100
-@@ -567,4 +567,17 @@
+diff -Naur linux.old/include/asm-mips/r4kcache.h linux.dev/include/asm-mips/r4kcache.h
+--- linux.old/include/asm-mips/r4kcache.h	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/include/asm-mips/r4kcache.h	2006-04-06 15:34:15.000000000 +0200
+@@ -658,4 +658,17 @@
  			cache128_unroll32(addr|ws,Index_Writeback_Inv_SD);
  }
  
@@ -19653,9 +22014,9 @@ diff -Nur linux-2.4.32/include/asm-mips/r4kcache.h linux-2.4.32-brcm/include/asm
 +}      
 +
  #endif /* __ASM_R4KCACHE_H */
-diff -Nur linux-2.4.32/include/asm-mips/serial.h linux-2.4.32-brcm/include/asm-mips/serial.h
---- linux-2.4.32/include/asm-mips/serial.h	2005-01-19 15:10:12.000000000 +0100
-+++ linux-2.4.32-brcm/include/asm-mips/serial.h	2005-12-16 23:39:11.428867000 +0100
+diff -Naur linux.old/include/asm-mips/serial.h linux.dev/include/asm-mips/serial.h
+--- linux.old/include/asm-mips/serial.h	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/include/asm-mips/serial.h	2006-04-06 15:34:15.000000000 +0200
 @@ -223,6 +223,13 @@
  #define TXX927_SERIAL_PORT_DEFNS
  #endif
@@ -19678,10 +22039,10 @@ diff -Nur linux-2.4.32/include/asm-mips/serial.h linux-2.4.32-brcm/include/asm-m
  	COBALT_SERIAL_PORT_DEFNS		\
  	DDB5477_SERIAL_PORT_DEFNS		\
  	EV96100_SERIAL_PORT_DEFNS		\
-diff -Nur linux-2.4.32/init/do_mounts.c linux-2.4.32-brcm/init/do_mounts.c
---- linux-2.4.32/init/do_mounts.c	2003-11-28 19:26:21.000000000 +0100
-+++ linux-2.4.32-brcm/init/do_mounts.c	2005-12-16 23:39:11.504871750 +0100
-@@ -253,7 +253,13 @@
+diff -Naur linux.old/init/do_mounts.c linux.dev/init/do_mounts.c
+--- linux.old/init/do_mounts.c	2006-04-06 15:38:09.000000000 +0200
++++ linux.dev/init/do_mounts.c	2006-04-06 15:34:15.000000000 +0200
+@@ -254,7 +254,13 @@
  	{ "ftlb", 0x2c08 },
  	{ "ftlc", 0x2c10 },
  	{ "ftld", 0x2c18 },