bk://gkernel.bkbits.net/netdev-2.6
jgarzik@redhat.com|ChangeSet|20040327023716|49537 jgarzik

# This is a BitKeeper generated diff -Nru style patch.
#
# ChangeSet
#   2004/03/26 21:32:26-05:00 romieu@fr.zoreil.com 
#   [netdrvr sis190] more RX path work
#   
#   - sis190_rx_interrupt converted to classical Rx skb handling;
#   - rx_copybreak *new.
#   
#   Some similarity with the r8169 driver can not be excluded.
# 
# drivers/net/sis190.c
#   2004/03/26 20:42:39-05:00 romieu@fr.zoreil.com +78 -38
#   2.6.5-rc2 - sis190 update
# 
# ChangeSet
#   2004/03/26 21:32:19-05:00 romieu@fr.zoreil.com 
#   [netdrvr sis190] don't use one huge buffer for all RX skb's
#   
#   Replace the giant receive buffer with individually allocated skb.
# 
# drivers/net/sis190.c
#   2004/03/26 20:37:47-05:00 romieu@fr.zoreil.com +60 -26
#   2.6.5-rc2 - sis190 update
# 
# ChangeSet
#   2004/03/26 21:32:11-05:00 romieu@fr.zoreil.com 
#   [netdrvr sis190] add dirty_rx to private structure
#   
#   Add dirty_rx (unused so far).
# 
# drivers/net/sis190.c
#   2004/03/26 20:37:25-05:00 romieu@fr.zoreil.com +5 -4
#   2.6.5-rc2 - sis190 update
# 
# ChangeSet
#   2004/03/26 21:32:04-05:00 romieu@fr.zoreil.com 
#   [netdrvr sis190] separate out RX skb alloc, fill
#   
#   Still no functionnal change. See r8169 driver for details.
# 
# drivers/net/sis190.c
#   2004/03/26 20:37:12-05:00 romieu@fr.zoreil.com +38 -13
#   2.6.5-rc2 - sis190 update
# 
# ChangeSet
#   2004/03/26 21:31:57-05:00 romieu@fr.zoreil.com 
#   [netdrvr sis190] add helpers
#   
#   New helpers (shamelessly stolen from r8169 driver):
#   - sis190_mark_as_last_descriptor;
#   - sis190_give_to_asic.
# 
# drivers/net/sis190.c
#   2004/03/26 20:36:46-05:00 romieu@fr.zoreil.com +14 -6
#   2.6.5-rc2 - sis190 update
# 
# ChangeSet
#   2004/03/26 21:31:50-05:00 romieu@fr.zoreil.com 
#   [netdrvr sis190] sis190_open() fixes/updates
#   
#   - make sis190_open() look like r8169_open() as they do the same thing;
#   - ready sis190_init_ring for incoming DMA api changes;
#   - trade a "for" loop against a single line, idiomatic, memset().
# 
# drivers/net/sis190.c
#   2004/03/26 17:47:11-05:00 romieu@fr.zoreil.com +29 -28
#   2.6.5-rc2 - sis190 update
# 
# ChangeSet
#   2004/03/26 21:31:42-05:00 romieu@fr.zoreil.com 
#   [netdrvr sis190] add pci-disable-device
#   
#   Balance the call to pci_enable_device() in SiS190_init_one() with a call
#   to pci_disable_device() in SiS190_remove_one().
# 
# drivers/net/sis190.c
#   2004/03/26 17:16:58-05:00 romieu@fr.zoreil.com +15 -9
#   2.6.5-rc2 - sis190 update
# 
# ChangeSet
#   2004/03/26 21:31:35-05:00 romieu@fr.zoreil.com 
#   [netdrvr sis190] fix endianness issues
#   
#   Endianness issues.
#   Use of le32_to_cpu/cpu_to_le32 in the code which handles the different
#   components of the Rx descriptors (PSize/status/buf_addr/buf_Len).
# 
# drivers/net/sis190.c
#   2004/03/26 17:13:10-05:00 romieu@fr.zoreil.com +21 -24
#   2.6.5-rc2 - sis190 update
# 
# ChangeSet
#   2004/03/25 23:52:21-05:00 romieu@fr.zoreil.com 
#   [netdrvr epic100] napi fixes
#   
#   Multiple invocation of __netif_rx_schedule() in epic_interrupt() while
#   epic_poll loops over __netif_rx_complete() leads to serious device
#   refcount leak.
# 
# drivers/net/epic100.c
#   2004/03/25 23:52:16-05:00 romieu@fr.zoreil.com +18 -15
#   [netdrvr epic100] napi fixes
#   
#   Multiple invocation of __netif_rx_schedule() in epic_interrupt() while
#   epic_poll loops over __netif_rx_complete() leads to serious device
#   refcount leak.
# 
# ChangeSet
#   2004/03/22 19:07:18-05:00 romieu@fr.zoreil.com 
#   [netdrvr epic100] napi 3/3 - transmit path
# 
# drivers/net/epic100.c
#   2004/03/22 18:18:40-05:00 romieu@fr.zoreil.com +9 -11
#   2.6.5-rc2 - epic100 napi
# 
# ChangeSet
#   2004/03/22 19:07:11-05:00 romieu@fr.zoreil.com 
#   [netdrvr epic100] napi 2/3 - receive path
# 
# drivers/net/epic100.c
#   2004/03/22 18:18:33-05:00 romieu@fr.zoreil.com +116 -21
#   2.6.5-rc2 - epic100 napi
# 
# ChangeSet
#   2004/03/22 19:07:03-05:00 romieu@fr.zoreil.com 
#   [netdrvr epic100] napi 1/3 - just shuffle some code around
#   
#   Isolate the classical TX part of epic_interrupt. Innocent code shuffling.
# 
# drivers/net/epic100.c
#   2004/03/22 16:53:18-05:00 romieu@fr.zoreil.com +76 -61
#   2.6.5-rc2 - epic100 napi
# 
# ChangeSet
#   2004/03/22 19:06:56-05:00 romieu@fr.zoreil.com 
#   [netdrvr epic100] minor cleanups
#   
#   - extra pci_disable_device() to balance invocation of pci_enable_device()
#     in epic_init_one() (-> error path + epic_remove_one());
#   - lazy return status in epic_init_one(), tsss...;
#   - memory dedicated to Rx descriptors was not freed after failure of
#     register_netdev() in epic_init_one();
#   - use of epic_pause() in epic_close() offers a small window for a late
#     interruption just before the final free_irq(). Let's close the window to
#     avoid two epic_rx() threads racing with each other.
# 
# drivers/net/epic100.c
#   2004/03/22 16:53:16-05:00 romieu@fr.zoreil.com +40 -19
#   2.6.5-rc2 - epic100 fixup
# 
# ChangeSet
#   2004/03/19 16:47:27-08:00 akpm@bix.(none) 
#   Merge bix.(none):/usr/src/bk25 into bix.(none):/usr/src/bk-netdev
# 
# net/core/dev.c
#   2004/03/19 16:47:25-08:00 akpm@bix.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/19 10:06:23-08:00 akpm@bix.(none) 
#   Merge bix.(none):/usr/src/bk25 into bix.(none):/usr/src/bk-netdev
# 
# net/core/dev.c
#   2004/03/19 10:06:20-08:00 akpm@bix.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/15 10:55:58-08:00 akpm@bix.(none) 
#   Merge bix.(none):/usr/src/bk25 into bix.(none):/usr/src/bk-netdev
# 
# drivers/net/pcnet32.c
#   2004/03/15 10:55:45-08:00 akpm@bix.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/14 10:52:32-08:00 akpm@bix.(none) 
#   Merge bix.(none):/usr/src/bk25 into bix.(none):/usr/src/bk-netdev
# 
# drivers/net/tg3.c
#   2004/03/14 10:52:15-08:00 akpm@bix.(none) +0 -0
#   Auto merged
# 
# drivers/net/pcnet32.c
#   2004/03/14 10:52:15-08:00 akpm@bix.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/13 23:55:15-08:00 akpm@bix.(none) 
#   Merge bix.(none):/usr/src/bk25 into bix.(none):/usr/src/bk-netdev
# 
# drivers/net/pcnet32.c
#   2004/03/13 23:55:00-08:00 akpm@bix.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/10 20:56:31-08:00 akpm@mnm.(none) 
#   Merge bk://gkernel.bkbits.net/netdev-2.6
#   into mnm.(none):/usr/src/bk-netdev
# 
# net/core/dev.c
#   2004/03/10 20:56:23-08:00 akpm@mnm.(none) +0 -0
#   Auto merged
# 
# drivers/net/tg3.c
#   2004/03/10 20:56:23-08:00 akpm@mnm.(none) +0 -0
#   Auto merged
# 
# drivers/net/pcnet32.c
#   2004/03/10 20:56:23-08:00 akpm@mnm.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/10 20:53:01-08:00 akpm@mnm.(none) 
#   Merge mnm.(none):/usr/src/bk25 into mnm.(none):/usr/src/bk-netdev
# 
# drivers/net/pcnet32.c
#   2004/03/10 20:52:54-08:00 akpm@mnm.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/09 13:23:36-08:00 akpm@mnm.(none) 
#   Merge mnm.(none):/usr/src/bk25 into mnm.(none):/usr/src/bk-netdev
# 
# drivers/net/pcnet32.c
#   2004/03/09 13:23:30-08:00 akpm@mnm.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/08 22:33:46-08:00 akpm@mnm.(none) 
#   Merge mnm.(none):/usr/src/bk25 into mnm.(none):/usr/src/bk-netdev
# 
# drivers/net/tg3.c
#   2004/03/08 22:33:39-08:00 akpm@mnm.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/02 05:02:02-08:00 akpm@mnm.(none) 
#   Merge bk://gkernel.bkbits.net/netdev-2.6
#   into mnm.(none):/usr/src/bk-netdev
# 
# net/core/dev.c
#   2004/03/02 05:01:52-08:00 akpm@mnm.(none) +0 -0
#   Auto merged
# 
# ChangeSet
#   2004/03/02 02:47:41-05:00 jgarzik@redhat.com 
#   Manually merge with upstream.
# 
# drivers/net/r8169.c
#   2004/03/02 02:47:37-05:00 jgarzik@redhat.com +0 -8
#   Manually merge with upstream.
# 
# ChangeSet
#   2004/02/29 13:29:08-05:00 hch@lst.de 
#   [PATCH] convert acenic to pci_driver API
# 
# drivers/net/acenic.c
#   2004/02/27 17:20:44-05:00 hch@lst.de +257 -313
#   kill acient compat cruft from acenic
# 
# ChangeSet
#   2004/02/29 13:29:01-05:00 hch@lst.de 
#   [PATCH] kill acient compat cruft from acenic
#   
#   Kills lots of really old cruft and adds a little cruft to actually
#   make the driver work with recent 2.4 again.
# 
# drivers/net/acenic.c
#   2004/02/27 10:04:04-05:00 hch@lst.de +4 -225
#   kill acient compat cruft from acenic
# 
# ChangeSet
#   2004/02/29 13:28:18-05:00 jgarzik@redhat.com 
#   Remove unused compatibility-defines include wan/lmc/lmc_ver.h.
#   
#   Noticed by Adrian Bunk.
# 
# BitKeeper/deleted/.del-lmc_ver.h~31d4d105f4ad3f
#   2004/02/29 13:25:38-05:00 jgarzik@redhat.com +0 -0
#   Delete: drivers/net/wan/lmc/lmc_ver.h
# 
# ChangeSet
#   2004/02/18 18:54:54-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Rx wrap bug:
#   - rtl8169_rx_interrupt() can wrap and process an Rx descriptor that it has
#     invalidated a few iterations before. The patch limits the number of
#     allowed descriptors between two invocations of the Rx refill function;
#   - rtl8169_rx_interrupt() now looks similar to rtl8169_tx_interrupt to
#     highlight the issue.
#   
# 
# drivers/net/r8169.c
#   2004/02/18 18:54:49-05:00 romieu@fr.zoreil.com +16 -9
#   [netdrvr r8169] Rx wrap bug:
#   - rtl8169_rx_interrupt() can wrap and process an Rx descriptor that it has
#     invalidated a few iterations before. The patch limits the number of
#     allowed descriptors between two invocations of the Rx refill function;
#   - rtl8169_rx_interrupt() now looks similar to rtl8169_tx_interrupt to
#     highlight the issue.
#   
# 
# ChangeSet
#   2004/01/24 20:43:33-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] fix TX race
#   
#   - possible tx descriptor index overflow (assume tp->dirty_tx = NUM_TX_DESC/2,
#     tp->cur_tx = NUM_TX_DESC - 1 and watch TxDescArray for example);
#   - the status of an inadequate descriptor is checked.
#   
#   NB: the bug will not necessarily noticed when tx_left == 1.
# 
# drivers/net/r8169.c
#   2004/01/24 15:04:36-05:00 romieu@fr.zoreil.com +7 -7
#   [netdrvr r8169] fix TX race
# 
# ChangeSet
#   2004/01/13 16:43:24-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] fix phy initialization loop init
# 
# drivers/net/r8169.c
#   2004/01/13 16:43:19-05:00 romieu@fr.zoreil.com +1 -1
#   [netdrvr r8169] fix phy initialization loop init
# 
# ChangeSet
#   2004/01/12 17:19:54-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] fix rx counter masking bug
# 
# drivers/net/r8169.c
#   2004/01/12 17:01:59-05:00 romieu@fr.zoreil.com +1 -1
#   [netdrvr r8169] fix rx counter masking bug
# 
# ChangeSet
#   2004/01/10 16:47:18-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] fix oops by removing __devinitdata marker
# 
# drivers/net/r8169.c
#   2004/01/10 11:47:13-05:00 romieu@fr.zoreil.com +1 -1
#   [netdrvr r8169] fix oops by removing __devinitdata marker
# 
# ChangeSet
#   2004/01/10 16:46:10-05:00 romieu@fr.zoreil.com 
#   [PATCH] 2.6.1-rc1-mm1 - typo of death in the r8169 driver
#   
#     silly bug in the r8169 driver.
# 
# drivers/net/r8169.c
#   2004/01/10 11:46:04-05:00 romieu@fr.zoreil.com +1 -1
#   [PATCH] 2.6.1-rc1-mm1 - typo of death in the r8169 driver
#   
#     silly bug in the r8169 driver.
# 
# ChangeSet
#   2004/01/10 16:11:57-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Stats fix (Fernando Alencar Marótica <famarost@unimep.br>).
# 
# drivers/net/r8169.c
#   2004/01/10 11:11:52-05:00 romieu@fr.zoreil.com +15 -0
#   [netdrvr r8169] Stats fix (Fernando Alencar Marótica <famarost@unimep.br>).
# 
# ChangeSet
#   2004/01/10 16:11:40-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Endianness update (original idea from Alexandra N. Kossovsky):
#   - descriptors status (bitfields enumerated as _DescStatusBit);
#   - address of buffers stored in Rx/Tx descriptors.
# 
# drivers/net/r8169.c
#   2004/01/10 11:11:34-05:00 romieu@fr.zoreil.com +16 -15
#   [netdrvr r8169] Endianness update (original idea from Alexandra N. Kossovsky):
#   - descriptors status (bitfields enumerated as _DescStatusBit);
#   - address of buffers stored in Rx/Tx descriptors.
# 
# ChangeSet
#   2004/01/10 16:01:37-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] fix RX
#   
#   Brown paper bag time: the Rx descriptors are contiguous and EORbit only
#   marks the last descriptor in the array. OWNbit implicitly marks the end
#   of the Rx descriptors segment which is owned by the nic.
# 
# drivers/net/r8169.c
#   2004/01/10 11:01:32-05:00 romieu@fr.zoreil.com +6 -14
#   [netdrvr r8169] fix RX
#   
#   Brown paper bag time: the Rx descriptors are contiguous and EORbit only
#   marks the last descriptor in the array. OWNbit implicitly marks the end
#   of the Rx descriptors segment which is owned by the nic.
# 
# ChangeSet
#   2004/01/10 16:01:28-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Suspend/resume code (Fernando Alencar Marótica).
# 
# drivers/net/r8169.c
#   2004/01/10 11:01:23-05:00 romieu@fr.zoreil.com +67 -5
#   [netdrvr r8169] Suspend/resume code (Fernando Alencar Marótica).
# 
# ChangeSet
#   2004/01/10 16:01:20-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Modification of the interrupt mask (RealTek).
# 
# drivers/net/r8169.c
#   2004/01/10 11:01:15-05:00 romieu@fr.zoreil.com +2 -5
#   [netdrvr r8169] Modification of the interrupt mask (RealTek).
# 
# ChangeSet
#   2004/01/10 16:01:11-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Driver forgot to update the transmitted bytes counter.
#   Originally done in rtl8169_start_xmit() by Realtek.
# 
# drivers/net/r8169.c
#   2004/01/10 11:01:06-05:00 romieu@fr.zoreil.com +4 -1
#   [netdrvr r8169] Driver forgot to update the transmitted bytes counter.
#   Originally done in rtl8169_start_xmit() by Realtek.
# 
# ChangeSet
#   2004/01/10 16:01:03-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Merge of changes from Realtek:
#   - register voodoo in rtl8169_hw_start().
# 
# drivers/net/r8169.c
#   2004/01/10 11:00:58-05:00 romieu@fr.zoreil.com +6 -0
#   [netdrvr r8169] Merge of changes from Realtek:
#   - register voodoo in rtl8169_hw_start().
# 
# ChangeSet
#   2004/01/10 16:00:54-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Merge of timer related changes from Realtek:
#   - changed their timeout value from 100 to HZ to trigger rtl8169_phy_timer();
#   - s/TX_TIMEOUT/RTL8169_TX_TIMEOUT/ to have RTL8169_{TX/PHY}_TIMEOUT.
# 
# drivers/net/r8169.c
#   2004/01/10 11:00:49-05:00 romieu@fr.zoreil.com +92 -2
#   [netdrvr r8169] Merge of timer related changes from Realtek:
#   - changed their timeout value from 100 to HZ to trigger rtl8169_phy_timer();
#   - s/TX_TIMEOUT/RTL8169_TX_TIMEOUT/ to have RTL8169_{TX/PHY}_TIMEOUT.
# 
# ChangeSet
#   2004/01/10 16:00:46-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Merge of changes done by Realtek to rtl8169_init_one():
#   - phy capability settings allows lower or equal capability as suggested
#     in Realtek's changes;
#   - I/O voodoo;
#   - no need to s/mdio_write/RTL8169_WRITE_GMII_REG/;
#   - s/rtl8169_hw_PHY_config/rtl8169_hw_phy_config/;
#   - rtl8169_hw_phy_config(): ad-hoc struct "phy_magic" to limit duplication
#     of code (yep, the u16 -> int conversions should work as expected);
#   - variable renames and whitepace changes ignored.
# 
# drivers/net/r8169.c
#   2004/01/10 11:00:41-05:00 romieu@fr.zoreil.com +107 -8
#   [netdrvr r8169] Merge of changes done by Realtek to rtl8169_init_one():
#   - phy capability settings allows lower or equal capability as suggested
#     in Realtek's changes;
#   - I/O voodoo;
#   - no need to s/mdio_write/RTL8169_WRITE_GMII_REG/;
#   - s/rtl8169_hw_PHY_config/rtl8169_hw_phy_config/;
#   - rtl8169_hw_phy_config(): ad-hoc struct "phy_magic" to limit duplication
#     of code (yep, the u16 -> int conversions should work as expected);
#   - variable renames and whitepace changes ignored.
# 
# ChangeSet
#   2004/01/10 16:00:37-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Add {mac/phy}_version.
#   - change of identification logic in rtl8169_init_board();
#   - {chip/rtl_chip}_info are merged in rtl_chip_info;
#   - misc style nits (lazy braces, SHOUTING MACROS from realtek converted to
#     functions).
# 
# drivers/net/r8169.c
#   2004/01/10 11:00:31-05:00 romieu@fr.zoreil.com +140 -37
#   [netdrvr r8169] Add {mac/phy}_version.
#   - change of identification logic in rtl8169_init_board();
#   - {chip/rtl_chip}_info are merged in rtl_chip_info;
#   - misc style nits (lazy braces, SHOUTING MACROS from realtek converted to
#     functions).
# 
# ChangeSet
#   2004/01/10 15:47:04-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Rx copybreak for small packets.
#   - removal of rtl8169_unmap_rx() (unneeded as for now).
# 
# drivers/net/r8169.c
#   2004/01/10 10:46:59-05:00 romieu@fr.zoreil.com +39 -8
#   [netdrvr r8169] Rx copybreak for small packets.
#   - removal of rtl8169_unmap_rx() (unneeded as for now).
# 
# ChangeSet
#   2004/01/10 15:42:42-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Conversion of Tx data buffers to PCI DMA:
#   - endianness is kept in a fscked state as it is in the original code
#     (will be adressed in a later patch);
#   - buf_addr of an unmapped descriptor is always set to the same value 
#     (cf rtl8169_unmap_tx_skb);
#   - nothing fancy, really.
# 
# drivers/net/r8169.c
#   2004/01/10 10:42:37-05:00 romieu@fr.zoreil.com +29 -7
#   [netdrvr r8169] Conversion of Tx data buffers to PCI DMA:
#   - endianness is kept in a fscked state as it is in the original code
#     (will be adressed in a later patch);
#   - buf_addr of an unmapped descriptor is always set to the same value 
#     (cf rtl8169_unmap_tx_skb);
#   - nothing fancy, really.
# 
# ChangeSet
#   2004/01/10 15:42:34-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] rtl8169_start_xmit fixes:
#   - it forgot to update stats if the skb couldn't be expanded;
#   - it didn't free it either if the descriptor was not available;
#   - move the spin_unlock nearer of the exit point instead of duplicating
#     it in the new branch.
# 
# drivers/net/r8169.c
#   2004/01/10 10:42:29-05:00 romieu@fr.zoreil.com +18 -13
#   [netdrvr r8169] rtl8169_start_xmit fixes:
#   - it forgot to update stats if the skb couldn't be expanded;
#   - it didn't free it either if the descriptor was not available;
#   - move the spin_unlock nearer of the exit point instead of duplicating
#     it in the new branch.
# 
# ChangeSet
#   2004/01/10 15:42:25-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Conversion of Rx data buffers to PCI DMA
#   - endianness is kept in a fscked state as it is in the original code
#     (will be adressed in a later patch);
#   - rtl8169_rx_clear() walks the buffer ring and releases the allocated
#     data buffers. It needs to be used in two places: 
#     - rtl8169_init_ring() failure path;
#     - normal device release (i.e. rtl8169_close);
#   - rtl8169_free_rx_skb() releases a Rx data buffer. Mostly an helper
#     for rtl8169_rx_clear(). As such it must:
#     - unmap the memory area;
#     - release the skb;
#     - prevent the ring descriptor from being used again;
#   - rtl8169_alloc_rx_skb() prepares a Rx data buffer for use.
#     As such it must:
#     - allocate an skb;
#     - map the memory area;
#     - reflect the changes in the ring descriptor.
#     This function is balanced by rtl8169_free_rx_skb().
#   - rtl8169_unmap_rx() simply helps with the 80-columns limit.
#   - rtl8169_rx_fill() walks a given range of the buffer ring and
#     try to turn any descriptor into a ready to use one. It returns the
#     count of modified descriptors and exits if an allocation fails.
#     It can be seen as balanced by rtl8169_rx_clear(). Motivation:
#     - partially abstract the (usually big) piece of code for the refill
#       logic at the end of the Rx interrupt;
#     - factorize the refill logic and the initial ring setup.
#   - simple conversion of rtl8169_rx_interrupt() without rx_copybreak
#     (will be adressed in a later patch).
# 
# drivers/net/r8169.c
#   2004/01/10 10:42:20-05:00 romieu@fr.zoreil.com +161 -74
#   [netdrvr r8169] Conversion of Rx data buffers to PCI DMA
#   - endianness is kept in a fscked state as it is in the original code
#     (will be adressed in a later patch);
#   - rtl8169_rx_clear() walks the buffer ring and releases the allocated
#     data buffers. It needs to be used in two places: 
#     - rtl8169_init_ring() failure path;
#     - normal device release (i.e. rtl8169_close);
#   - rtl8169_free_rx_skb() releases a Rx data buffer. Mostly an helper
#     for rtl8169_rx_clear(). As such it must:
#     - unmap the memory area;
#     - release the skb;
#     - prevent the ring descriptor from being used again;
#   - rtl8169_alloc_rx_skb() prepares a Rx data buffer for use.
#     As such it must:
#     - allocate an skb;
#     - map the memory area;
#     - reflect the changes in the ring descriptor.
#     This function is balanced by rtl8169_free_rx_skb().
#   - rtl8169_unmap_rx() simply helps with the 80-columns limit.
#   - rtl8169_rx_fill() walks a given range of the buffer ring and
#     try to turn any descriptor into a ready to use one. It returns the
#     count of modified descriptors and exits if an allocation fails.
#     It can be seen as balanced by rtl8169_rx_clear(). Motivation:
#     - partially abstract the (usually big) piece of code for the refill
#       logic at the end of the Rx interrupt;
#     - factorize the refill logic and the initial ring setup.
#   - simple conversion of rtl8169_rx_interrupt() without rx_copybreak
#     (will be adressed in a later patch).
# 
# ChangeSet
#   2004/01/10 15:41:42-05:00 romieu@fr.zoreil.com 
#   [netdrvr r8169] Conversion of Rx/Tx descriptors to consistent DMA:
#   - use pci_alloc_consistent() for Rx/Tx descriptors in rtl8169_open()
#     (balanced by pci_free_consistent() on error path as well as in
#     rtl8169_close());
#   - removal of the fields {Rx/Tx}DescArrays in struct rtl8169_private
#     as there is no need to store a non-256 bytes aligned address any more;
#   - fix for rtl8169_open() leak when RxBufferRings allocation fails.
#     Said allocation is pushed to rtl8169_init_ring() as part of an evil
#     cunning plan.
# 
# drivers/net/r8169.c
#   2004/01/10 10:39:13-05:00 romieu@fr.zoreil.com +52 -47
#   [netdrvr r8169] Conversion of Rx/Tx descriptors to consistent DMA:
#   - use pci_alloc_consistent() for Rx/Tx descriptors in rtl8169_open()
#     (balanced by pci_free_consistent() on error path as well as in
#     rtl8169_close());
#   - removal of the fields {Rx/Tx}DescArrays in struct rtl8169_private
#     as there is no need to store a non-256 bytes aligned address any more;
#   - fix for rtl8169_open() leak when RxBufferRings allocation fails.
#     Said allocation is pushed to rtl8169_init_ring() as part of an evil
#     cunning plan.
# 
diff -Nru a/drivers/net/acenic.c b/drivers/net/acenic.c
--- a/drivers/net/acenic.c	Tue Mar 30 20:12:21 2004
+++ b/drivers/net/acenic.c	Tue Mar 30 20:12:21 2004
@@ -131,7 +131,6 @@
 #define PCI_DEVICE_ID_SGI_ACENIC	0x0009
 #endif
 
-#if LINUX_VERSION_CODE >= 0x20400
 static struct pci_device_id acenic_pci_tbl[] = {
 	{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
 	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
@@ -156,37 +155,6 @@
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
-#endif
-
-
-#ifndef MODULE_LICENSE
-#define MODULE_LICENSE(a)
-#endif
-
-#ifndef wmb
-#define wmb()	mb()
-#endif
-
-#ifndef __exit
-#define __exit
-#endif
-
-#ifndef __devinit
-#define __devinit	__init
-#endif
-
-#ifndef SMP_CACHE_BYTES
-#define SMP_CACHE_BYTES	L1_CACHE_BYTES
-#endif
-
-#ifndef SET_MODULE_OWNER
-#define SET_MODULE_OWNER(dev)		do{} while(0)
-#define ACE_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
-#define ACE_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
-#else
-#define ACE_MOD_INC_USE_COUNT		do{} while(0)
-#define ACE_MOD_DEC_USE_COUNT		do{} while(0)
-#endif
 
 #ifndef SET_NETDEV_DEV
 #define SET_NETDEV_DEV(net, pdev)	do{} while(0)
@@ -198,151 +166,8 @@
 #define ace_sync_irq(irq)	synchronize_irq()
 #endif
 
-#if LINUX_VERSION_CODE < 0x2051e
-#define local_irq_save(flags)		do{__save_flags(flags) ; \
-					   __cli();} while(0)
-#define local_irq_restore(flags)	__restore_flags(flags)
-#endif
-
-#if (LINUX_VERSION_CODE < 0x02030d)
-#define pci_resource_start(dev, bar)	dev->base_address[bar]
-#elif (LINUX_VERSION_CODE < 0x02032c)
-#define pci_resource_start(dev, bar)	dev->resource[bar].start
-#endif
-
-#if (LINUX_VERSION_CODE < 0x02030e)
-#define net_device device
-#endif
-
-
-#if (LINUX_VERSION_CODE < 0x02032a)
-typedef u32 dma_addr_t;
-
-static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
-					 dma_addr_t *dma_handle)
-{
-	void *virt_ptr;
-
-	virt_ptr = kmalloc(size, GFP_KERNEL);
-	if (!virt_ptr)
-		return NULL;
-	*dma_handle = virt_to_bus(virt_ptr);
-	return virt_ptr;
-}
-
-#define pci_free_consistent(cookie, size, ptr, dma_ptr)	kfree(ptr)
-#define pci_map_page(cookie, page, off, size, dir)	\
-	virt_to_bus(page_address(page)+(off))
-#define pci_unmap_page(cookie, address, size, dir)
-#define pci_set_dma_mask(dev, mask)		\
-	(((u64)(mask) & 0xffffffff00000000) == 0 ? 0 : -EIO)
-#define pci_dma_supported(dev, mask)		\
-	(((u64)(mask) & 0xffffffff00000000) == 0 ? 1 : 0)
-
-#elif (LINUX_VERSION_CODE < 0x02040d)
-
-/*
- * 2.4.13 introduced pci_map_page()/pci_unmap_page() - for 2.4.12 and prior,
- * fall back on pci_map_single()/pci_unnmap_single().
- *
- * We are guaranteed that the page is mapped at this point since
- * pci_map_page() is only used upon valid struct skb's.
- */
-static inline dma_addr_t
-pci_map_page(struct pci_dev *cookie, struct page *page, unsigned long off,
-	     size_t size, int dir)
-{
-	void *page_virt;
-
-	page_virt = page_address(page);
-	if (!page_virt)
-		BUG();
-	return pci_map_single(cookie, (page_virt + off), size, dir);
-}
-#define pci_unmap_page(cookie, dma_addr, size, dir)	\
-	pci_unmap_single(cookie, dma_addr, size, dir)
-#endif
-
-#if (LINUX_VERSION_CODE < 0x020412)
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME)		0
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do{} while(0)
-#define pci_unmap_len(PTR, LEN_NAME)		0
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do{} while(0)
-#endif
-
-
-#if (LINUX_VERSION_CODE < 0x02032b)
-/*
- * SoftNet
- *
- * For pre-softnet kernels we need to tell the upper layer not to
- * re-enter start_xmit() while we are in there. However softnet
- * guarantees not to enter while we are in there so there is no need
- * to do the netif_stop_queue() dance unless the transmit queue really
- * gets stuck. This should also improve performance according to tests
- * done by Aman Singla.
- */
-#define dev_kfree_skb_irq(a)			dev_kfree_skb(a)
-#define netif_wake_queue(dev)			clear_bit(0, &dev->tbusy)
-#define netif_stop_queue(dev)			set_bit(0, &dev->tbusy)
-#define late_stop_netif_stop_queue(dev)		do{} while(0)
-#define early_stop_netif_stop_queue(dev)	test_and_set_bit(0,&dev->tbusy)
-#define early_stop_netif_wake_queue(dev)	netif_wake_queue(dev)
-
-static inline void netif_start_queue(struct net_device *dev)
-{
-	dev->tbusy = 0;
-	dev->interrupt = 0;
-	dev->start = 1;
-}
-
-#define ace_mark_net_bh()			mark_bh(NET_BH)
-#define netif_queue_stopped(dev)		dev->tbusy
-#define netif_running(dev)			dev->start
-#define ace_if_down(dev)			do{dev->start = 0;} while(0)
-
-#define tasklet_struct				tq_struct
-static inline void tasklet_schedule(struct tasklet_struct *tasklet)
-{
-	queue_task(tasklet, &tq_immediate);
-	mark_bh(IMMEDIATE_BH);
-}
-
-static inline void tasklet_init(struct tasklet_struct *tasklet,
-				void (*func)(unsigned long),
-				unsigned long data)
-{
-	tasklet->next = NULL;
-	tasklet->sync = 0;
-	tasklet->routine = (void (*)(void *))func;
-	tasklet->data = (void *)data;
-}
-#define tasklet_kill(tasklet)			do{} while(0)
-#else
-#define late_stop_netif_stop_queue(dev)		netif_stop_queue(dev)
-#define early_stop_netif_stop_queue(dev)	0
-#define early_stop_netif_wake_queue(dev)	do{} while(0)
-#define ace_mark_net_bh()			do{} while(0)
-#define ace_if_down(dev)			do{} while(0)
-#endif
-
-#if (LINUX_VERSION_CODE >= 0x02031b)
-#define NEW_NETINIT
-#define ACE_PROBE_ARG				void
-#else
-#define ACE_PROBE_ARG				struct net_device *dev
-#endif
-
-#ifndef min_t
-#define min_t(type,a,b)	(((a)<(b))?(a):(b))
-#endif
-
-#ifndef ARCH_HAS_PREFETCHW
-#ifndef prefetchw
-#define prefetchw(x)				do{} while(0)
-#endif
+#ifndef offset_in_page
+#define offset_in_page(ptr)	((unsigned long)(ptr) & ~PAGE_MASK)
 #endif
 
 #define ACE_MAX_MOD_PARMS	8
@@ -595,407 +420,323 @@
 static int tx_ratio[ACE_MAX_MOD_PARMS];
 static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
 
+MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
+MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(trace, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(tx_coal_tick, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(tx_ratio, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
+MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
+MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
+MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
+MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
+MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
+MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
+
+
 static char version[] __initdata = 
   "acenic.c: v0.92 08/05/2002  Jes Sorensen, linux-acenic@SunSITE.dk\n"
   "                            http://home.cern.ch/~jes/gige/acenic.html\n";
 
-static struct net_device *root_dev;
-
-static int probed __initdata = 0;
-
-
-int __devinit acenic_probe (ACE_PROBE_ARG)
+static int __devinit acenic_probe_one(struct pci_dev *pdev,
+		const struct pci_device_id *id)
 {
-#ifdef NEW_NETINIT
 	struct net_device *dev;
-#endif
 	struct ace_private *ap;
-	struct pci_dev *pdev = NULL;
-	int boards_found = 0;
-	int version_disp;
-
-	if (probed)
-		return -ENODEV;
-	probed++;
-
-	version_disp = 0;
-
-	while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET<<8, pdev))) {
-
-		if (!((pdev->vendor == PCI_VENDOR_ID_ALTEON) &&
-		      ((pdev->device == PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE) ||
-		       (pdev->device == PCI_DEVICE_ID_ALTEON_ACENIC_COPPER)))&&
-		    !((pdev->vendor == PCI_VENDOR_ID_3COM) &&
-		      (pdev->device == PCI_DEVICE_ID_3COM_3C985)) &&
-		    !((pdev->vendor == PCI_VENDOR_ID_NETGEAR) &&
-		      ((pdev->device == PCI_DEVICE_ID_NETGEAR_GA620) || 
-		       (pdev->device == PCI_DEVICE_ID_NETGEAR_GA620T))) &&
-		/*
-		 * Farallon used the DEC vendor ID on their cards by
-		 * mistake for a while
-		 */
-		    !((pdev->vendor == PCI_VENDOR_ID_DEC) &&
-		      (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX)) &&
-		    !((pdev->vendor == PCI_VENDOR_ID_ALTEON) &&
-		      (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T)) &&
-		    !((pdev->vendor == PCI_VENDOR_ID_SGI) &&
-		      (pdev->device == PCI_DEVICE_ID_SGI_ACENIC)))
-			continue;
-
-		dev = alloc_etherdev(sizeof(struct ace_private));
-		if (dev == NULL) {
-			printk(KERN_ERR "acenic: Unable to allocate "
-			       "net_device structure!\n");
-			break;
-		}
+	static int boards_found;
 
-		SET_MODULE_OWNER(dev);
-		SET_NETDEV_DEV(dev, &pdev->dev);
+	dev = alloc_etherdev(sizeof(struct ace_private));
+	if (dev == NULL) {
+		printk(KERN_ERR "acenic: Unable to allocate "
+		       "net_device structure!\n");
+		return -ENOMEM;
+	}
+
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
 
-		ap = dev->priv;
-		ap->pdev = pdev;
+	ap = dev->priv;
+	ap->pdev = pdev;
 
-		dev->open = &ace_open;
-		dev->hard_start_xmit = &ace_start_xmit;
-		dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 #if ACENIC_DO_VLAN
-		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-		dev->vlan_rx_register = ace_vlan_rx_register;
-		dev->vlan_rx_kill_vid = ace_vlan_rx_kill_vid;
-#endif
-		if (1) {
-			static void ace_watchdog(struct net_device *dev);
-			dev->tx_timeout = &ace_watchdog;
-			dev->watchdog_timeo = 5*HZ;
-		}
-		dev->stop = &ace_close;
-		dev->get_stats = &ace_get_stats;
-		dev->set_multicast_list = &ace_set_multicast_list;
-		dev->do_ioctl = &ace_ioctl;
-		dev->set_mac_address = &ace_set_mac_addr;
-		dev->change_mtu = &ace_change_mtu;
+	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+	dev->vlan_rx_register = ace_vlan_rx_register;
+	dev->vlan_rx_kill_vid = ace_vlan_rx_kill_vid;
+#endif
+	if (1) {
+		static void ace_watchdog(struct net_device *dev);
+		dev->tx_timeout = &ace_watchdog;
+		dev->watchdog_timeo = 5*HZ;
+	}
 
-		/* display version info if adapter is found */
-		if (!version_disp)
-		{
-			/* set display flag to TRUE so that */
-			/* we only display this string ONCE */
-			version_disp = 1;
-			printk(version);
-		}
+	dev->open = &ace_open;
+	dev->stop = &ace_close;
+	dev->hard_start_xmit = &ace_start_xmit;
+	dev->get_stats = &ace_get_stats;
+	dev->set_multicast_list = &ace_set_multicast_list;
+	dev->do_ioctl = &ace_ioctl;
+	dev->set_mac_address = &ace_set_mac_addr;
+	dev->change_mtu = &ace_change_mtu;
 
-		if (pci_enable_device(pdev)) {
-			free_netdev(dev);
-			continue;
-		}
+	/* we only display this string ONCE */
+	if (!boards_found)
+		printk(version);
 
-		/*
-		 * Enable master mode before we start playing with the
-		 * pci_command word since pci_set_master() will modify
-		 * it.
-		 */
-		pci_set_master(pdev);
+	if (pci_enable_device(pdev))
+		goto fail_free_netdev;
 
-		pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
+	/*
+	 * Enable master mode before we start playing with the
+	 * pci_command word since pci_set_master() will modify
+	 * it.
+	 */
+	pci_set_master(pdev);
 
-		/* OpenFirmware on Mac's does not set this - DOH.. */ 
-		if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
-			printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
-			       "access - was not enabled by BIOS/Firmware\n",
-			       dev->name);
-			ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
-			pci_write_config_word(ap->pdev, PCI_COMMAND,
-					      ap->pci_command);
-			wmb();
-		}
+	pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
 
-		pci_read_config_byte(pdev, PCI_LATENCY_TIMER,
-				     &ap->pci_latency);
-		if (ap->pci_latency <= 0x40) {
-			ap->pci_latency = 0x40;
-			pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
-					      ap->pci_latency);
-		}
+	/* OpenFirmware on Mac's does not set this - DOH.. */ 
+	if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
+		printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
+		       "access - was not enabled by BIOS/Firmware\n",
+		       dev->name);
+		ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
+		pci_write_config_word(ap->pdev, PCI_COMMAND,
+				      ap->pci_command);
+		wmb();
+	}
 
-		/*
-		 * Remap the regs into kernel space - this is abuse of
-		 * dev->base_addr since it was means for I/O port
-		 * addresses but who gives a damn.
-		 */
-		dev->base_addr = pci_resource_start(pdev, 0);
-		ap->regs = (struct ace_regs *)ioremap(dev->base_addr, 0x4000);
-		if (!ap->regs) {
-			printk(KERN_ERR "%s:  Unable to map I/O register, "
-			       "AceNIC %i will be disabled.\n",
-			       dev->name, boards_found);
-			break;
-		}
+	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
+	if (ap->pci_latency <= 0x40) {
+		ap->pci_latency = 0x40;
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
+	}
 
-		switch(pdev->vendor) {
-		case PCI_VENDOR_ID_ALTEON:
-			if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
-				strncpy(ap->name, "Farallon PN9100-T "
-					"Gigabit Ethernet", sizeof (ap->name));
-				printk(KERN_INFO "%s: Farallon PN9100-T ",
-				       dev->name);
-			} else {
-				strncpy(ap->name, "AceNIC Gigabit Ethernet",
-					sizeof (ap->name));
-				printk(KERN_INFO "%s: Alteon AceNIC ",
-				       dev->name);
-			}
-			break;
-		case PCI_VENDOR_ID_3COM:
-			strncpy(ap->name, "3Com 3C985 Gigabit Ethernet",
-				sizeof (ap->name));
-			printk(KERN_INFO "%s: 3Com 3C985 ", dev->name);
-			break;
-		case PCI_VENDOR_ID_NETGEAR:
-			strncpy(ap->name, "NetGear GA620 Gigabit Ethernet",
-				sizeof (ap->name));
-			printk(KERN_INFO "%s: NetGear GA620 ", dev->name);
-			break;
-		case PCI_VENDOR_ID_DEC:
-			if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
-				strncpy(ap->name, "Farallon PN9000-SX "
-					"Gigabit Ethernet", sizeof (ap->name));
-				printk(KERN_INFO "%s: Farallon PN9000-SX ",
-				       dev->name);
-				break;
-			}
-		case PCI_VENDOR_ID_SGI:
-			strncpy(ap->name, "SGI AceNIC Gigabit Ethernet",
+	/*
+	 * Remap the regs into kernel space - this is abuse of
+	 * dev->base_addr since it was means for I/O port
+	 * addresses but who gives a damn.
+	 */
+	dev->base_addr = pci_resource_start(pdev, 0);
+	ap->regs = (struct ace_regs *)ioremap(dev->base_addr, 0x4000);
+	if (!ap->regs) {
+		printk(KERN_ERR "%s:  Unable to map I/O register, "
+		       "AceNIC %i will be disabled.\n",
+		       dev->name, boards_found);
+		goto fail_free_netdev;
+	}
+
+	switch(pdev->vendor) {
+	case PCI_VENDOR_ID_ALTEON:
+		if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
+			strncpy(ap->name, "Farallon PN9100-T "
+				"Gigabit Ethernet", sizeof (ap->name));
+			printk(KERN_INFO "%s: Farallon PN9100-T ",
+			       dev->name);
+		} else {
+			strncpy(ap->name, "AceNIC Gigabit Ethernet",
 				sizeof (ap->name));
-			printk(KERN_INFO "%s: SGI AceNIC ", dev->name);
-			break;
-		default:
- 			strncpy(ap->name, "Unknown AceNIC based Gigabit "
-				"Ethernet", sizeof (ap->name));
-			printk(KERN_INFO "%s: Unknown AceNIC ", dev->name);
+			printk(KERN_INFO "%s: Alteon AceNIC ",
+			       dev->name);
+		}
+		break;
+	case PCI_VENDOR_ID_3COM:
+		strncpy(ap->name, "3Com 3C985 Gigabit Ethernet",
+			sizeof (ap->name));
+		printk(KERN_INFO "%s: 3Com 3C985 ", dev->name);
+		break;
+	case PCI_VENDOR_ID_NETGEAR:
+		strncpy(ap->name, "NetGear GA620 Gigabit Ethernet",
+			sizeof (ap->name));
+		printk(KERN_INFO "%s: NetGear GA620 ", dev->name);
+		break;
+	case PCI_VENDOR_ID_DEC:
+		if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
+			strncpy(ap->name, "Farallon PN9000-SX "
+				"Gigabit Ethernet", sizeof (ap->name));
+			printk(KERN_INFO "%s: Farallon PN9000-SX ",
+			       dev->name);
 			break;
 		}
-		ap->name [sizeof (ap->name) - 1] = '\0';
-		printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
+	case PCI_VENDOR_ID_SGI:
+		strncpy(ap->name, "SGI AceNIC Gigabit Ethernet",
+			sizeof (ap->name));
+		printk(KERN_INFO "%s: SGI AceNIC ", dev->name);
+		break;
+	default:
+ 		strncpy(ap->name, "Unknown AceNIC based Gigabit "
+			"Ethernet", sizeof (ap->name));
+		printk(KERN_INFO "%s: Unknown AceNIC ", dev->name);
+		break;
+	}
+
+	ap->name [sizeof (ap->name) - 1] = '\0';
+	printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
 #ifdef __sparc__
-		printk("irq %s\n", __irq_itoa(pdev->irq));
+	printk("irq %s\n", __irq_itoa(pdev->irq));
 #else
-		printk("irq %i\n", pdev->irq);
+	printk("irq %i\n", pdev->irq);
 #endif
 
 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
-		if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
-			printk(KERN_ERR "%s: Driver compiled without Tigon I"
-			       " support - NIC disabled\n", dev->name);
-			ace_init_cleanup(dev);
-			free_netdev(dev);
-			continue;
-		}
+	if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
+		printk(KERN_ERR "%s: Driver compiled without Tigon I"
+		       " support - NIC disabled\n", dev->name);
+		goto fail_uninit;
+	}
 #endif
 
-		if (ace_allocate_descriptors(dev)) {
-			/*
-			 * ace_allocate_descriptors() calls
-			 * ace_init_cleanup() on error.
-			 */
-			free_netdev(dev);
-			continue;
-		}
+	if (ace_allocate_descriptors(dev))
+		goto fail_free_netdev;
 
 #ifdef MODULE
-		if (boards_found >= ACE_MAX_MOD_PARMS)
-			ap->board_idx = BOARD_IDX_OVERFLOW;
-		else
-			ap->board_idx = boards_found;
+	if (boards_found >= ACE_MAX_MOD_PARMS)
+		ap->board_idx = BOARD_IDX_OVERFLOW;
+	else
+		ap->board_idx = boards_found;
 #else
-		ap->board_idx = BOARD_IDX_STATIC;
+	ap->board_idx = BOARD_IDX_STATIC;
 #endif
 
-		if (ace_init(dev)) {
-			/*
-			 * ace_init() calls ace_init_cleanup() on error.
-			 */
-			free_netdev(dev);
-			continue;
-		}
+	if (ace_init(dev))
+		goto fail_free_netdev;
 
-		if (register_netdev(dev)) {
-			printk(KERN_ERR "acenic: device registration failed\n");
-			ace_init_cleanup(dev);
-			free_netdev(dev);
-			continue;
-		}
-
-		if (ap->pci_using_dac)
-			dev->features |= NETIF_F_HIGHDMA;
-
-		boards_found++;
+	if (register_netdev(dev)) {
+		printk(KERN_ERR "acenic: device registration failed\n");
+		goto fail_uninit;
 	}
 
-	/*
-	 * If we're at this point we're going through ace_probe() for
-	 * the first time.  Return success (0) if we've initialized 1
-	 * or more boards. Otherwise, return failure (-ENODEV).
-	 */
-
-	if (boards_found > 0)
-		return 0;
-	else
-		return -ENODEV;
-}
+	if (ap->pci_using_dac)
+		dev->features |= NETIF_F_HIGHDMA;
 
+	pci_set_drvdata(pdev, dev);
 
-#ifdef MODULE
-MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
-MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(trace, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(tx_coal_tick, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(tx_ratio, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
-MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
-MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
-MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
-MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
-MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
-MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
-#endif
+	boards_found++;
+	return 0;
 
+ fail_uninit:
+	ace_init_cleanup(dev);
+ fail_free_netdev:
+	free_netdev(dev);
+	return -ENODEV;
+}
 
-static void __exit ace_module_cleanup(void)
+static void __devexit acenic_remove_one(struct pci_dev *pdev)
 {
-	struct ace_private *ap;
-	struct ace_regs *regs;
-	struct net_device *next;
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct ace_private *ap = dev->priv;
+	struct ace_regs *regs = ap->regs;
 	short i;
 
-	while (root_dev) {
-		ap = root_dev->priv;
-		next = ap->next;
-		unregister_netdev(root_dev);
-
-		regs = ap->regs;
+	unregister_netdev(dev);
 
-		writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
-		if (ap->version >= 2)
-			writel(readl(&regs->CpuBCtrl) | CPU_HALT,
-			       &regs->CpuBCtrl);
-		/*
-		 * This clears any pending interrupts
-		 */
-		writel(1, &regs->Mb0Lo);
-		readl(&regs->CpuCtrl);	/* flush */
+	writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
+	if (ap->version >= 2)
+		writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
+	
+	/*
+	 * This clears any pending interrupts
+	 */
+	writel(1, &regs->Mb0Lo);
+	readl(&regs->CpuCtrl);	/* flush */
 
-		/*
-		 * Make sure no other CPUs are processing interrupts
-		 * on the card before the buffers are being released.
-		 * Otherwise one might experience some `interesting'
-		 * effects.
-		 *
-		 * Then release the RX buffers - jumbo buffers were
-		 * already released in ace_close().
-		 */
-		ace_sync_irq(root_dev->irq);
+	/*
+	 * Make sure no other CPUs are processing interrupts
+	 * on the card before the buffers are being released.
+	 * Otherwise one might experience some `interesting'
+	 * effects.
+	 *
+	 * Then release the RX buffers - jumbo buffers were
+	 * already released in ace_close().
+	 */
+	ace_sync_irq(dev->irq);
 
-		for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
-			struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
+	for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
+		struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
 
-			if (skb) {
-				struct ring_info *ringp;
-				dma_addr_t mapping;
+		if (skb) {
+			struct ring_info *ringp;
+			dma_addr_t mapping;
 
-				ringp = &ap->skb->rx_std_skbuff[i];
-				mapping = pci_unmap_addr(ringp, mapping);
-				pci_unmap_page(ap->pdev, mapping,
-					       ACE_STD_BUFSIZE - (2 + 16),
-					       PCI_DMA_FROMDEVICE);
+			ringp = &ap->skb->rx_std_skbuff[i];
+			mapping = pci_unmap_addr(ringp, mapping);
+			pci_unmap_page(ap->pdev, mapping,
+				       ACE_STD_BUFSIZE - (2 + 16),
+				       PCI_DMA_FROMDEVICE);
 
-				ap->rx_std_ring[i].size = 0;
-				ap->skb->rx_std_skbuff[i].skb = NULL;
-				dev_kfree_skb(skb);
-			}
-		}
-		if (ap->version >= 2) {
-			for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
-				struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
-
-				if (skb) {
-					struct ring_info *ringp;
-					dma_addr_t mapping;
-
-					ringp = &ap->skb->rx_mini_skbuff[i];
-					mapping = pci_unmap_addr(ringp,mapping);
-					pci_unmap_page(ap->pdev, mapping,
-						       ACE_MINI_BUFSIZE - (2 + 16),
-						       PCI_DMA_FROMDEVICE);
-
-					ap->rx_mini_ring[i].size = 0;
-					ap->skb->rx_mini_skbuff[i].skb = NULL;
-					dev_kfree_skb(skb);
-				}
-			}
+			ap->rx_std_ring[i].size = 0;
+			ap->skb->rx_std_skbuff[i].skb = NULL;
+			dev_kfree_skb(skb);
 		}
-		for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
-			struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
+	}
+
+	if (ap->version >= 2) {
+		for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
+			struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
+
 			if (skb) {
 				struct ring_info *ringp;
 				dma_addr_t mapping;
 
-				ringp = &ap->skb->rx_jumbo_skbuff[i];
-				mapping = pci_unmap_addr(ringp, mapping);
+				ringp = &ap->skb->rx_mini_skbuff[i];
+				mapping = pci_unmap_addr(ringp,mapping);
 				pci_unmap_page(ap->pdev, mapping,
-					       ACE_JUMBO_BUFSIZE - (2 + 16),
+					       ACE_MINI_BUFSIZE - (2 + 16),
 					       PCI_DMA_FROMDEVICE);
 
-				ap->rx_jumbo_ring[i].size = 0;
-				ap->skb->rx_jumbo_skbuff[i].skb = NULL;
+				ap->rx_mini_ring[i].size = 0;
+				ap->skb->rx_mini_skbuff[i].skb = NULL;
 				dev_kfree_skb(skb);
 			}
 		}
-
-		ace_init_cleanup(root_dev);
-		free_netdev(root_dev);
-		root_dev = next;
 	}
-}
 
+	for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
+		struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
+		if (skb) {
+			struct ring_info *ringp;
+			dma_addr_t mapping;
 
-int __init ace_module_init(void)
-{
-	int status;
+			ringp = &ap->skb->rx_jumbo_skbuff[i];
+			mapping = pci_unmap_addr(ringp, mapping);
+			pci_unmap_page(ap->pdev, mapping,
+				       ACE_JUMBO_BUFSIZE - (2 + 16),
+				       PCI_DMA_FROMDEVICE);
 
-	root_dev = NULL;
+			ap->rx_jumbo_ring[i].size = 0;
+			ap->skb->rx_jumbo_skbuff[i].skb = NULL;
+			dev_kfree_skb(skb);
+		}
+	}
 
-#ifdef NEW_NETINIT
-	status = acenic_probe();
-#else
-	status = acenic_probe(NULL);
-#endif
-	return status;
+	ace_init_cleanup(dev);
+	free_netdev(dev);
 }
 
+static struct pci_driver acenic_pci_driver = {
+	.name		= "acenic",
+	.id_table	= acenic_pci_tbl,
+	.probe		= acenic_probe_one,
+	.remove		= __devexit_p(acenic_remove_one),
+};
 
-#if (LINUX_VERSION_CODE < 0x02032a)
-#ifdef MODULE
-int init_module(void)
+static int __init acenic_init(void)
 {
-	return ace_module_init();
+	return pci_module_init(&acenic_pci_driver);
 }
 
-
-void cleanup_module(void)
+static void __exit acenic_exit(void)
 {
-	ace_module_cleanup();
+	pci_unregister_driver(&acenic_pci_driver);
 }
-#endif
-#else
-module_init(ace_module_init);
-module_exit(ace_module_cleanup);
-#endif
 
+module_init(acenic_init);
+module_exit(acenic_exit);
 
 static void ace_free_descriptors(struct net_device *dev)
 {
@@ -1462,13 +1203,6 @@
 	} else
 		dev->irq = pdev->irq;
 
-	/*
-	 * Register the device here to be able to catch allocated
-	 * interrupt handlers in case the firmware doesn't come up.
-	 */
-	ap->next = root_dev;
-	root_dev = dev;
-
 #ifdef INDEX_DEBUG
 	spin_lock_init(&ap->debug_lock);
 	ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
@@ -2642,8 +2376,6 @@
 
 	netif_start_queue(dev);
 
-	ACE_MOD_INC_USE_COUNT;
-
 	/*
 	 * Setup the bottom half rx ring refill handler
 	 */
@@ -2660,8 +2392,6 @@
 	unsigned long flags;
 	short i;
 
-	ace_if_down(dev);
-
 	/*
 	 * Without (or before) releasing irq and stopping hardware, this
 	 * is an absolute non-sense, by the way. It will be reset instantly
@@ -2733,7 +2463,6 @@
 	ace_unmask_irq(dev);
 	local_irq_restore(flags);
 
-	ACE_MOD_DEC_USE_COUNT;
 	return 0;
 }
 
@@ -2789,12 +2518,6 @@
 	struct ace_regs *regs = ap->regs;
 	struct tx_desc *desc;
 	u32 idx, flagsize;
-
- 	/*
-	 * This only happens with pre-softnet, ie. 2.2.x kernels.
- 	 */
-	if (early_stop_netif_stop_queue(dev))
- 		return 1;
 
 restart:
 	idx = ap->tx_prd;
diff -Nru a/drivers/net/epic100.c b/drivers/net/epic100.c
--- a/drivers/net/epic100.c	Tue Mar 30 20:12:21 2004
+++ b/drivers/net/epic100.c	Tue Mar 30 20:12:21 2004
@@ -96,9 +96,9 @@
    Making the Tx ring too large decreases the effectiveness of channel
    bonding and packet priority.
    There are no ill effects from too-large receive rings. */
-#define TX_RING_SIZE	16
-#define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
-#define RX_RING_SIZE	32
+#define TX_RING_SIZE	256
+#define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
+#define RX_RING_SIZE	256
 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
 
@@ -292,6 +292,12 @@
 	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
 };
 
+#define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
+
+#define EpicNapiEvent	(TxEmpty | TxDone | \
+			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
+#define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
+
 static u16 media2miictl[16] = {
 	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
 	0, 0, 0, 0,  0, 0, 0, 0 };
@@ -330,9 +336,12 @@
 
 	/* Ring pointers. */
 	spinlock_t lock;				/* Group with Tx control cache line. */
+	spinlock_t napi_lock;
+	unsigned int reschedule_in_poll;
 	unsigned int cur_tx, dirty_tx;
 
 	unsigned int cur_rx, dirty_rx;
+	u32 irq_mask;
 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 
 	struct pci_dev *pci_dev;			/* PCI bus location. */
@@ -359,7 +368,8 @@
 static void epic_tx_timeout(struct net_device *dev);
 static void epic_init_ring(struct net_device *dev);
 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static int epic_rx(struct net_device *dev);
+static int epic_rx(struct net_device *dev, int budget);
+static int epic_poll(struct net_device *dev, int *budget);
 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static struct ethtool_ops netdev_ethtool_ops;
@@ -378,7 +388,7 @@
 	int irq;
 	struct net_device *dev;
 	struct epic_private *ep;
-	int i, option = 0, duplex = 0;
+	int i, ret, option = 0, duplex = 0;
 	void *ring_space;
 	dma_addr_t ring_dma;
 
@@ -392,29 +402,33 @@
 	
 	card_idx++;
 	
-	i = pci_enable_device(pdev);
-	if (i)
-		return i;
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto out;
 	irq = pdev->irq;
 
 	if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
 		printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_out_disable;
 	}
 	
 	pci_set_master(pdev);
 
+	ret = pci_request_regions(pdev, DRV_NAME);
+	if (ret < 0)
+		goto err_out_disable;
+
+	ret = -ENOMEM;
+
 	dev = alloc_etherdev(sizeof (*ep));
 	if (!dev) {
 		printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
-		return -ENOMEM;
+		goto err_out_free_res;
 	}
 	SET_MODULE_OWNER(dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-	if (pci_request_regions(pdev, DRV_NAME))
-		goto err_out_free_netdev;
-
 #ifdef USE_IO_OPS
 	ioaddr = pci_resource_start (pdev, 0);
 #else
@@ -422,7 +436,7 @@
 	ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
 	if (!ioaddr) {
 		printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
-		goto err_out_free_res;
+		goto err_out_free_netdev;
 	}
 #endif
 
@@ -459,7 +473,9 @@
 	dev->base_addr = ioaddr;
 	dev->irq = irq;
 
-	spin_lock_init (&ep->lock);
+	spin_lock_init(&ep->lock);
+	spin_lock_init(&ep->napi_lock);
+	ep->reschedule_in_poll = 0;
 
 	/* Bring the chip out of low-power mode. */
 	outl(0x4200, ioaddr + GENCTL);
@@ -489,6 +505,9 @@
 	ep->pci_dev = pdev;
 	ep->chip_id = chip_idx;
 	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
+	ep->irq_mask = 
+		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
+		 | CntFull | TxUnderrun | EpicNapiEvent;
 
 	/* Find the connected MII xcvrs.
 	   Doing this in open() would allow detecting external xcvrs later, but
@@ -543,10 +562,12 @@
 	dev->ethtool_ops = &netdev_ethtool_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
 	dev->tx_timeout = &epic_tx_timeout;
+	dev->poll = epic_poll;
+	dev->weight = 64;
 
-	i = register_netdev(dev);
-	if (i)
-		goto err_out_unmap_tx;
+	ret = register_netdev(dev);
+	if (ret < 0)
+		goto err_out_unmap_rx;
 
 	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
 		   dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
@@ -554,19 +575,24 @@
 		printk("%2.2x:", dev->dev_addr[i]);
 	printk("%2.2x.\n", dev->dev_addr[i]);
 
-	return 0;
+out:
+	return ret;
 
+err_out_unmap_rx:
+	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 err_out_unmap_tx:
 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 err_out_iounmap:
 #ifndef USE_IO_OPS
 	iounmap(ioaddr);
-err_out_free_res:
-#endif
-	pci_release_regions(pdev);
 err_out_free_netdev:
+#endif
 	free_netdev(dev);
-	return -ENODEV;
+err_out_free_res:
+	pci_release_regions(pdev);
+err_out_disable:
+	pci_disable_device(pdev);
+	goto out;
 }
 
 /* Serial EEPROM section. */
@@ -592,6 +618,36 @@
 #define EE_READ256_CMD	(6 << 8)
 #define EE_ERASE_CMD	(7 << 6)
 
+static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
+{
+	long ioaddr = dev->base_addr;
+
+	outl(0x00000000, ioaddr + INTMASK);
+}
+
+static inline void __epic_pci_commit(long ioaddr)
+{
+#ifndef USE_IO_OPS
+	inl(ioaddr + INTMASK);
+#endif
+}
+
+static void epic_napi_irq_off(struct net_device *dev, struct epic_private *ep)
+{
+	long ioaddr = dev->base_addr;
+
+	outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
+	__epic_pci_commit(ioaddr);
+}
+
+static void epic_napi_irq_on(struct net_device *dev, struct epic_private *ep)
+{
+	long ioaddr = dev->base_addr;
+
+	/* No need to commit possible posted write */
+	outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
+}
+
 static int __devinit read_eeprom(long ioaddr, int location)
 {
 	int i;
@@ -752,9 +808,8 @@
 
 	/* Enable interrupts by setting the interrupt mask. */
 	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-		 | CntFull | TxUnderrun | TxDone | TxEmpty
-		 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
-		 ioaddr + INTMASK);
+		 | CntFull | TxUnderrun 
+		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
 
 	if (debug > 1)
 		printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
@@ -795,7 +850,7 @@
 	}
 
 	/* Remove the packets on the Rx queue. */
-	epic_rx(dev);
+	epic_rx(dev, RX_RING_SIZE);
 }
 
 static void epic_restart(struct net_device *dev)
@@ -841,9 +896,9 @@
 
 	/* Enable interrupts by setting the interrupt mask. */
 	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-		 | CntFull | TxUnderrun | TxDone | TxEmpty
-		 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
-		 ioaddr + INTMASK);
+		 | CntFull | TxUnderrun
+		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+
 	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
 		   " interrupt %4.4x.\n",
 		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
@@ -929,7 +984,6 @@
 	int i;
 
 	ep->tx_full = 0;
-	ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
 	ep->dirty_tx = ep->cur_tx = 0;
 	ep->cur_rx = ep->dirty_rx = 0;
 	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
@@ -1029,6 +1083,76 @@
 	return 0;
 }
 
+static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
+			  int status)
+{
+	struct net_device_stats *stats = &ep->stats;
+
+#ifndef final_version
+	/* There was an major error, log it. */
+	if (debug > 1)
+		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+		       dev->name, status);
+#endif
+	stats->tx_errors++;
+	if (status & 0x1050)
+		stats->tx_aborted_errors++;
+	if (status & 0x0008)
+		stats->tx_carrier_errors++;
+	if (status & 0x0040)
+		stats->tx_window_errors++;
+	if (status & 0x0010)
+		stats->tx_fifo_errors++;
+}
+
+static void epic_tx(struct net_device *dev, struct epic_private *ep)
+{
+	unsigned int dirty_tx, cur_tx;
+
+	/*
+	 * Note: if this lock becomes a problem we can narrow the locked
+	 * region at the cost of occasionally grabbing the lock more times.
+	 */
+	cur_tx = ep->cur_tx;
+	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
+		struct sk_buff *skb;
+		int entry = dirty_tx % TX_RING_SIZE;
+		int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
+
+		if (txstatus & DescOwn)
+			break;	/* It still hasn't been Txed */
+
+		if (likely(txstatus & 0x0001)) {
+			ep->stats.collisions += (txstatus >> 8) & 15;
+			ep->stats.tx_packets++;
+			ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
+		} else
+			epic_tx_error(dev, ep, txstatus);
+
+		/* Free the original skb. */
+		skb = ep->tx_skbuff[entry];
+		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 
+				 skb->len, PCI_DMA_TODEVICE);
+		dev_kfree_skb_irq(skb);
+		ep->tx_skbuff[entry] = 0;
+	}
+
+#ifndef final_version
+	if (cur_tx - dirty_tx > TX_RING_SIZE) {
+		printk(KERN_WARNING
+		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+		       dev->name, dirty_tx, cur_tx, ep->tx_full);
+		dirty_tx += TX_RING_SIZE;
+	}
+#endif
+	ep->dirty_tx = dirty_tx;
+	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+		/* The ring is no longer full, allow new TX entries. */
+		ep->tx_full = 0;
+		netif_wake_queue(dev);
+	}
+}
+
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -1042,7 +1166,7 @@
 	do {
 		status = inl(ioaddr + INTSTAT);
 		/* Acknowledge all of the current interrupt sources ASAP. */
-		outl(status & 0x00007fff, ioaddr + INTSTAT);
+		outl(status & EpicNormalEvent, ioaddr + INTSTAT);
 
 		if (debug > 4)
 			printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
@@ -1053,74 +1177,21 @@
 			break;
 		handled = 1;
 
-		if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
-			epic_rx(dev);
-
-		if (status & (TxEmpty | TxDone)) {
-			unsigned int dirty_tx, cur_tx;
-
-			/* Note: if this lock becomes a problem we can narrow the locked
-			   region at the cost of occasionally grabbing the lock more
-			   times. */
-			spin_lock(&ep->lock);
-			cur_tx = ep->cur_tx;
-			dirty_tx = ep->dirty_tx;
-			for (; cur_tx - dirty_tx > 0; dirty_tx++) {
-				struct sk_buff *skb;
-				int entry = dirty_tx % TX_RING_SIZE;
-				int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
-
-				if (txstatus & DescOwn)
-					break;			/* It still hasn't been Txed */
-
-				if ( ! (txstatus & 0x0001)) {
-					/* There was an major error, log it. */
-#ifndef final_version
-					if (debug > 1)
-						printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
-							   dev->name, txstatus);
-#endif
-					ep->stats.tx_errors++;
-					if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
-					if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
-					if (txstatus & 0x0040) ep->stats.tx_window_errors++;
-					if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
-				} else {
-					ep->stats.collisions += (txstatus >> 8) & 15;
-					ep->stats.tx_packets++;
-					ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
-				}
-
-				/* Free the original skb. */
-				skb = ep->tx_skbuff[entry];
-				pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 
-						 skb->len, PCI_DMA_TODEVICE);
-				dev_kfree_skb_irq(skb);
-				ep->tx_skbuff[entry] = 0;
-			}
-
-#ifndef final_version
-			if (cur_tx - dirty_tx > TX_RING_SIZE) {
-				printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
-					   dev->name, dirty_tx, cur_tx, ep->tx_full);
-				dirty_tx += TX_RING_SIZE;
-			}
-#endif
-			ep->dirty_tx = dirty_tx;
-			if (ep->tx_full
-				&& cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
-				/* The ring is no longer full, allow new TX entries. */
-				ep->tx_full = 0;
-				spin_unlock(&ep->lock);
-				netif_wake_queue(dev);
+		if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
+			spin_lock(&ep->napi_lock);
+			if (netif_rx_schedule_prep(dev)) {
+				epic_napi_irq_off(dev, ep);
+				__netif_rx_schedule(dev);
 			} else
-				spin_unlock(&ep->lock);
+				ep->reschedule_in_poll++;
+			spin_unlock(&ep->napi_lock);
 		}
+		status &= ~EpicNapiEvent;
 
 		/* Check uncommon events all at once. */
-		if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
-					  PCIBusErr170 | PCIBusErr175)) {
-			if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
+		if (status &
+		    (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
+			if (status == EpicRemoved)
 				break;
 			/* Always update the error counts to avoid overhead later. */
 			ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
@@ -1133,11 +1204,6 @@
 				/* Restart the transmit process. */
 				outl(RestartTx, ioaddr + COMMAND);
 			}
-			if (status & RxOverflow) {		/* Missed a Rx frame. */
-				ep->stats.rx_errors++;
-			}
-			if (status & (RxOverflow | RxFull))
-				outw(RxQueued, ioaddr + COMMAND);
 			if (status & PCIBusErr170) {
 				printk(KERN_ERR "%s: PCI Bus Error!  EPIC status %4.4x.\n",
 					   dev->name, status);
@@ -1147,6 +1213,8 @@
 			/* Clear all error sources. */
 			outl(status & 0x7f18, ioaddr + INTSTAT);
 		}
+		if (!(status & EpicNormalEvent))
+			break;
 		if (--boguscnt < 0) {
 			printk(KERN_ERR "%s: Too much work at interrupt, "
 				   "IntrStatus=0x%8.8x.\n",
@@ -1164,7 +1232,7 @@
 	return IRQ_RETVAL(handled);
 }
 
-static int epic_rx(struct net_device *dev)
+static int epic_rx(struct net_device *dev, int budget)
 {
 	struct epic_private *ep = dev->priv;
 	int entry = ep->cur_rx % RX_RING_SIZE;
@@ -1174,6 +1242,10 @@
 	if (debug > 4)
 		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
 			   ep->rx_ring[entry].rxstatus);
+
+	if (rx_work_limit > budget)
+		rx_work_limit = budget;
+
 	/* If we own the next entry, it's a new packet. Send it up. */
 	while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
 		int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
@@ -1234,7 +1306,7 @@
 				ep->rx_skbuff[entry] = NULL;
 			}
 			skb->protocol = eth_type_trans(skb, dev);
-			netif_rx(skb);
+			netif_receive_skb(skb);
 			dev->last_rx = jiffies;
 			ep->stats.rx_packets++;
 			ep->stats.rx_bytes += pkt_len;
@@ -1262,6 +1334,61 @@
 	return work_done;
 }
 
+static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
+{
+	long ioaddr = dev->base_addr;
+	int status;
+
+	status = inl(ioaddr + INTSTAT);
+
+	if (status == EpicRemoved)
+		return;
+	if (status & RxOverflow) 	/* Missed a Rx frame. */
+		ep->stats.rx_errors++;
+	if (status & (RxOverflow | RxFull))
+		outw(RxQueued, ioaddr + COMMAND);
+}
+
+static int epic_poll(struct net_device *dev, int *budget)
+{
+	struct epic_private *ep = dev->priv;
+	int work_done, orig_budget;
+	long ioaddr = dev->base_addr;
+
+	orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
+
+rx_action:
+
+	epic_tx(dev, ep);
+
+	work_done = epic_rx(dev, *budget);
+
+	epic_rx_err(dev, ep);
+
+	*budget -= work_done;
+	dev->quota -= work_done;
+
+	if (netif_running(dev) && (work_done < orig_budget)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&ep->napi_lock, flags);
+
+		if (ep->reschedule_in_poll) {
+			ep->reschedule_in_poll--;
+			spin_unlock_irqrestore(&ep->napi_lock, flags);
+			goto rx_action;
+		}
+
+		outl(EpicNapiEvent, ioaddr + INTSTAT);
+		epic_napi_irq_on(dev, ep);
+		__netif_rx_complete(dev);
+
+		spin_unlock_irqrestore(&ep->napi_lock, flags);
+	}
+
+	return (work_done >= orig_budget);
+}
+
 static int epic_close(struct net_device *dev)
 {
 	long ioaddr = dev->base_addr;
@@ -1276,9 +1403,13 @@
 			   dev->name, (int)inl(ioaddr + INTSTAT));
 
 	del_timer_sync(&ep->timer);
-	epic_pause(dev);
+
+	epic_disable_int(dev, ep);
+
 	free_irq(dev->irq, dev);
 
+	epic_pause(dev);
+
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		skb = ep->rx_skbuff[i];
@@ -1476,6 +1607,7 @@
 #endif
 	pci_release_regions(pdev);
 	free_netdev(dev);
+	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
 	/* pci_power_off(pdev, -1); */
 }
diff -Nru a/drivers/net/r8169.c b/drivers/net/r8169.c
--- a/drivers/net/r8169.c	Tue Mar 30 20:12:21 2004
+++ b/drivers/net/r8169.c	Tue Mar 30 20:12:21 2004
@@ -56,9 +56,11 @@
 	        printk( "Assertion failed! %s,%s,%s,line=%d\n",	\
         	#expr,__FILE__,__FUNCTION__,__LINE__);		\
         }
+#define dprintk(fmt, args...)	do { printk(PFX fmt, ## args) } while (0)
 #else
 #define assert(expr) do {} while (0)
-#endif
+#define dprintk(fmt, args...)	do {} while (0)
+#endif /* RTL8169_DEBUG */
 
 /* media options */
 #define MAX_UNITS 8
@@ -89,9 +91,12 @@
 #define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
 #define NUM_RX_DESC	64	/* Number of Rx descriptor registers */
 #define RX_BUF_SIZE	1536	/* Rx Buffer size */
+#define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
+#define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
 
 #define RTL_MIN_IO_SIZE 0x80
-#define TX_TIMEOUT  (6*HZ)
+#define RTL8169_TX_TIMEOUT	(6*HZ)
+#define RTL8169_PHY_TIMEOUT	(HZ) 
 
 /* write/read MMIO register */
 #define RTL_W8(reg, val8)	writeb ((val8), ioaddr + (reg))
@@ -101,11 +106,35 @@
 #define RTL_R16(reg)		readw (ioaddr + (reg))
 #define RTL_R32(reg)		((unsigned long) readl (ioaddr + (reg)))
 
-static struct {
+enum mac_version {
+	RTL_GIGA_MAC_VER_B = 0x00,
+	/* RTL_GIGA_MAC_VER_C = 0x03, */
+	RTL_GIGA_MAC_VER_D = 0x01,
+	RTL_GIGA_MAC_VER_E = 0x02
+};
+
+enum phy_version {
+	RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
+	RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
+	RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
+	RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
+	RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
+};
+
+
+#define _R(NAME,MAC,MASK) \
+	{ .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
+
+const static struct {
 	const char *name;
-} board_info[] __devinitdata = {
-	{
-"RealTek RTL8169 Gigabit Ethernet"},};
+	u8 mac_version;
+	u32 RxConfigMask;	/* Clears the bits supported by this chip */
+} rtl_chip_info[] = {
+	_R("RTL8169",		RTL_GIGA_MAC_VER_B, 0xff7e1880),
+	_R("RTL8169s/8110s",	RTL_GIGA_MAC_VER_D, 0xff7e1880),
+	_R("RTL8169s/8110s",	RTL_GIGA_MAC_VER_E, 0xff7e1880)
+};
+#undef _R
 
 static struct pci_device_id rtl8169_pci_tbl[] = {
 	{0x10ec, 0x8169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -114,6 +143,8 @@
 
 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 
+static int rx_copybreak = 200;
+
 enum RTL8169_registers {
 	MAC0 = 0,		/* Ethernet hardware address. */
 	MAR0 = 8,		/* Multicast filter. */
@@ -242,14 +273,6 @@
 	TBILinkOK = 0x02000000,
 };
 
-const static struct {
-	const char *name;
-	u8 version;		/* depend on RTL8169 docs */
-	u32 RxConfigMask;	/* should clear the bits supported by this chip */
-} rtl_chip_info[] = {
-	{
-"RTL-8169", 0x00, 0xff7e1880,},};
-
 enum _DescStatusBit {
 	OWNbit = 0x80000000,
 	EORbit = 0x40000000,
@@ -257,6 +280,8 @@
 	LSbit = 0x10000000,
 };
 
+#define RsvdMask	0x3fffc000
+
 struct TxDesc {
 	u32 status;
 	u32 vlan_tag;
@@ -277,28 +302,33 @@
 	struct net_device_stats stats;	/* statistics of net device */
 	spinlock_t lock;	/* spin lock flag */
 	int chipset;
-	unsigned long cur_rx;	/* Index into the Rx descriptor buffer of next Rx pkt. */
-	unsigned long cur_tx;	/* Index into the Tx descriptor buffer of next Rx pkt. */
-	unsigned long dirty_tx;
-	unsigned char *TxDescArrays;	/* Index of Tx Descriptor buffer */
-	unsigned char *RxDescArrays;	/* Index of Rx Descriptor buffer */
+	int mac_version;
+	int phy_version;
+	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
+	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
+	u32 dirty_rx;
+	u32 dirty_tx;
 	struct TxDesc *TxDescArray;	/* Index of 256-alignment Tx Descriptor buffer */
 	struct RxDesc *RxDescArray;	/* Index of 256-alignment Rx Descriptor buffer */
-	unsigned char *RxBufferRings;	/* Index of Rx Buffer  */
-	unsigned char *RxBufferRing[NUM_RX_DESC];	/* Index of Rx Buffer array */
+	dma_addr_t TxPhyAddr;
+	dma_addr_t RxPhyAddr;
+	struct sk_buff *Rx_skbuff[NUM_RX_DESC];	/* Rx data buffers */
 	struct sk_buff *Tx_skbuff[NUM_TX_DESC];	/* Index of Transmit data buffer */
+	struct timer_list timer;
+	unsigned long phy_link_down_cnt;
 };
 
 MODULE_AUTHOR("Realtek");
 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
 MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
 MODULE_LICENSE("GPL");
 
 static int rtl8169_open(struct net_device *dev);
 static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance,
 			      struct pt_regs *regs);
-static void rtl8169_init_ring(struct net_device *dev);
+static int rtl8169_init_ring(struct net_device *dev);
 static void rtl8169_hw_start(struct net_device *dev);
 static int rtl8169_close(struct net_device *dev);
 static void rtl8169_set_rx_mode(struct net_device *dev);
@@ -306,11 +336,15 @@
 static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev);
 
 static const u16 rtl8169_intr_mask =
-    SYSErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver | TxErr | TxOK |
-    RxErr | RxOK;
+    RxUnderrun | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
 static const unsigned int rtl8169_rx_config =
     (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
 
+#define PHY_Cap_10_Half_Or_Less PHY_Cap_10_Half
+#define PHY_Cap_10_Full_Or_Less PHY_Cap_10_Full | PHY_Cap_10_Half_Or_Less
+#define PHY_Cap_100_Half_Or_Less PHY_Cap_100_Half | PHY_Cap_10_Full_Or_Less
+#define PHY_Cap_100_Full_Or_Less PHY_Cap_100_Full | PHY_Cap_100_Half_Or_Less
+
 void
 mdio_write(void *ioaddr, int RegAddr, int value)
 {
@@ -342,13 +376,258 @@
 		if (RTL_R32(PHYAR) & 0x80000000) {
 			value = (int) (RTL_R32(PHYAR) & 0xFFFF);
 			break;
-		} else {
-			udelay(100);
 		}
+		udelay(100);
 	}
 	return value;
 }
 
+static void rtl8169_write_gmii_reg_bit(void *ioaddr, int reg, int bitnum,
+				       int bitval)
+{
+	int val;
+
+	val = mdio_read(ioaddr, reg);
+	val = (bitval == 1) ?
+		val | (bitval << bitnum) :  val & ~(0x0001 << bitnum);
+	mdio_write(ioaddr, reg, val & 0xffff); 
+}
+
+static void rtl8169_get_mac_version(struct rtl8169_private *tp, void *ioaddr)
+{
+	const struct {
+		u32 mask;
+		int mac_version;
+	} mac_info[] = {
+		{ 0x1 << 26,	RTL_GIGA_MAC_VER_E },
+		{ 0x1 << 23,	RTL_GIGA_MAC_VER_D }, 
+		{ 0x00000000,	RTL_GIGA_MAC_VER_B } /* Catch-all */
+	}, *p = mac_info;
+	u32 reg;
+
+	reg = RTL_R32(TxConfig) & 0x7c800000;
+	while ((reg & p->mask) != p->mask)
+		p++;
+	tp->mac_version = p->mac_version;
+}
+
+static void rtl8169_print_mac_version(struct rtl8169_private *tp)
+{
+	struct {
+		int version;
+		char *msg;
+	} mac_print[] = {
+		{ RTL_GIGA_MAC_VER_E, "RTL_GIGA_MAC_VER_E" },
+		{ RTL_GIGA_MAC_VER_D, "RTL_GIGA_MAC_VER_D" },
+		{ RTL_GIGA_MAC_VER_B, "RTL_GIGA_MAC_VER_B" },
+		{ 0, NULL }
+	}, *p;
+
+	for (p = mac_print; p->msg; p++) {
+		if (tp->mac_version == p->version) {
+			dprintk("mac_version == %s (%04d)\n", p->msg,
+				  p->version);
+			return;
+		}
+	}
+	dprintk("mac_version == Unknown\n");
+}
+
+static void rtl8169_get_phy_version(struct rtl8169_private *tp, void *ioaddr)
+{
+	const struct {
+		u16 mask;
+		u16 set;
+		int phy_version;
+	} phy_info[] = {
+		{ 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
+		{ 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
+		{ 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
+		{ 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
+	}, *p = phy_info;
+	u16 reg;
+
+	reg = mdio_read(ioaddr, 3) & 0xffff;
+	while ((reg & p->mask) != p->set)
+		p++;
+	tp->phy_version = p->phy_version;
+}
+
+static void rtl8169_print_phy_version(struct rtl8169_private *tp)
+{
+	struct {
+		int version;
+		char *msg;
+		u32 reg;
+	} phy_print[] = {
+		{ RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
+		{ RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
+		{ RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
+		{ RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
+		{ 0, NULL, 0x0000 }
+	}, *p;
+
+	for (p = phy_print; p->msg; p++) {
+		if (tp->phy_version == p->version) {
+			dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
+			return;
+		}
+	}
+	dprintk("phy_version == Unknown\n");
+}
+
+static void rtl8169_hw_phy_config(struct net_device *dev)
+{
+	struct rtl8169_private *tp = dev->priv;
+	void *ioaddr = tp->mmio_addr;
+	struct {
+		u16 regs[5]; /* Beware of bit-sign propagation */
+	} phy_magic[5] = { {
+		{ 0x0000,	//w 4 15 12 0
+		  0x00a1,	//w 3 15 0 00a1
+		  0x0008,	//w 2 15 0 0008
+		  0x1020,	//w 1 15 0 1020
+		  0x1000 } },{	//w 0 15 0 1000
+		{ 0x7000,	//w 4 15 12 7
+		  0xff41,	//w 3 15 0 ff41
+		  0xde60,	//w 2 15 0 de60
+		  0x0140,	//w 1 15 0 0140
+		  0x0077 } },{	//w 0 15 0 0077
+		{ 0xa000,	//w 4 15 12 a
+		  0xdf01,	//w 3 15 0 df01
+		  0xdf20,	//w 2 15 0 df20
+		  0xff95,	//w 1 15 0 ff95
+		  0xfa00 } },{	//w 0 15 0 fa00
+		{ 0xb000,	//w 4 15 12 b
+		  0xff41,	//w 3 15 0 ff41
+		  0xde20,	//w 2 15 0 de20
+		  0x0140,	//w 1 15 0 0140
+		  0x00bb } },{	//w 0 15 0 00bb
+		{ 0xf000,	//w 4 15 12 f
+		  0xdf01,	//w 3 15 0 df01
+		  0xdf20,	//w 2 15 0 df20
+		  0xff95,	//w 1 15 0 ff95
+		  0xbf00 }	//w 0 15 0 bf00
+		}
+	}, *p = phy_magic;
+	int i;
+
+	rtl8169_print_mac_version(tp);
+	rtl8169_print_phy_version(tp);
+
+	if (tp->mac_version <= RTL_GIGA_MAC_VER_B)
+		return;
+	if (tp->phy_version >= RTL_GIGA_PHY_VER_F) 
+		return;
+
+	dprintk("MAC version != 0 && PHY version == 0 or 1\n");
+	dprintk("Do final_reg2.cfg\n");
+
+	/* Shazam ! */
+
+	// phy config for RTL8169s mac_version C chip
+	mdio_write(ioaddr, 31, 0x0001);			//w 31 2 0 1
+	mdio_write(ioaddr, 21, 0x1000);			//w 21 15 0 1000
+	mdio_write(ioaddr, 24, 0x65c7);			//w 24 15 0 65c7
+	rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0);	//w 4 11 11 0
+
+	for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
+		int val, pos = 4;
+
+		val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
+		mdio_write(ioaddr, pos, val);
+		while (--pos >= 0)
+			mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
+		rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
+		rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
+	}
+	mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
+}
+
+static void rtl8169_hw_phy_reset(struct net_device *dev)
+{
+	struct rtl8169_private *tp = dev->priv;
+	void *ioaddr = tp->mmio_addr;
+	int i, val;
+
+	printk(KERN_WARNING PFX "%s: Reset RTL8169s PHY\n", dev->name);
+
+	val = (mdio_read(ioaddr, 0) | 0x8000) & 0xffff;
+	mdio_write(ioaddr, 0, val);
+
+	for (i = 50; i >= 0; i--) {
+		if (!(mdio_read(ioaddr, 0) & 0x8000))
+			break;
+		udelay(100); /* Gross */
+	}
+
+	if (i < 0) {
+		printk(KERN_WARNING PFX "%s: no PHY Reset ack. Giving up.\n",
+		       dev->name);
+	}
+}
+
+static void rtl8169_phy_timer(unsigned long __opaque)
+{
+	struct net_device *dev = (struct net_device *)__opaque;
+	struct rtl8169_private *tp = dev->priv;
+	struct timer_list *timer = &tp->timer;
+	void *ioaddr = tp->mmio_addr;
+
+	assert(tp->mac_version > RTL_GIGA_MAC_VER_B);
+	assert(tp->phy_version < RTL_GIGA_PHY_VER_G);
+
+	if (RTL_R8(PHYstatus) & LinkStatus)
+		tp->phy_link_down_cnt = 0;
+	else {
+		tp->phy_link_down_cnt++;
+		if (tp->phy_link_down_cnt >= 12) {
+			int reg;
+
+			// If link on 1000, perform phy reset.
+			reg = mdio_read(ioaddr, PHY_1000_CTRL_REG);
+			if (reg & PHY_Cap_1000_Full) 
+				rtl8169_hw_phy_reset(dev);
+
+			tp->phy_link_down_cnt = 0;
+		}
+	}
+
+	mod_timer(timer, RTL8169_PHY_TIMEOUT);
+}
+
+static inline void rtl8169_delete_timer(struct net_device *dev)
+{
+	struct rtl8169_private *tp = dev->priv;
+	struct timer_list *timer = &tp->timer;
+
+	if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
+	    (tp->phy_version >= RTL_GIGA_PHY_VER_G))
+		return;
+
+	del_timer_sync(timer);
+
+	tp->phy_link_down_cnt = 0;
+}
+
+static inline void rtl8169_request_timer(struct net_device *dev)
+{
+	struct rtl8169_private *tp = dev->priv;
+	struct timer_list *timer = &tp->timer;
+
+	if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
+	    (tp->phy_version >= RTL_GIGA_PHY_VER_G))
+		return;
+
+	tp->phy_link_down_cnt = 0;
+
+	init_timer(timer);
+	timer->expires = jiffies + RTL8169_PHY_TIMEOUT;
+	timer->data = (unsigned long)(dev);
+	timer->function = rtl8169_phy_timer;
+	add_timer(timer);
+}
+
 static int __devinit
 rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
 		   void **ioaddr_out)
@@ -356,9 +635,9 @@
 	void *ioaddr = NULL;
 	struct net_device *dev;
 	struct rtl8169_private *tp;
-	int rc, i;
 	unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
-	u32 tmp;
+	int rc, i, acpi_idle_state = 0, pm_cap;
+
 
 	assert(pdev != NULL);
 	assert(ioaddr_out != NULL);
@@ -379,8 +658,22 @@
 
 	// enable device (incl. PCI PM wakeup and hotplug setup)
 	rc = pci_enable_device(pdev);
-	if (rc)
+	if (rc) {
+		printk(KERN_ERR PFX "%s: unable to enable device\n", pdev->slot_name);
 		goto err_out;
+	}
+
+	/* save power state before pci_enable_device overwrites it */
+	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (pm_cap) {
+		u16 pwr_command;
+
+		pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
+		acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
+	} else {
+		printk(KERN_ERR PFX "Cannot find PowerManagement capability, aborting.\n");
+		goto err_out_free_res;
+	}
 
 	mmio_start = pci_resource_start(pdev, 1);
 	mmio_end = pci_resource_end(pdev, 1);
@@ -402,8 +695,10 @@
 	}
 
 	rc = pci_request_regions(pdev, dev->name);
-	if (rc)
+	if (rc) {
+		printk(KERN_ERR PFX "%s: Could not request regions.\n", pdev->slot_name);
 		goto err_out_disable;
+	}
 
 	// enable PCI bus-mastering
 	pci_set_master(pdev);
@@ -420,30 +715,32 @@
 	RTL_W8(ChipCmd, CmdReset);
 
 	// Check that the chip has finished the reset.
-	for (i = 1000; i > 0; i--)
+	for (i = 1000; i > 0; i--) {
 		if ((RTL_R8(ChipCmd) & CmdReset) == 0)
 			break;
-		else
-			udelay(10);
+		udelay(10);
+	}
 
-	// identify chip attached to board
-	tmp = RTL_R32(TxConfig);
-	tmp = ((tmp & 0x7c000000) + ((tmp & 0x00800000) << 2)) >> 24;
-
-	for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--)
-		if (tmp == rtl_chip_info[i].version) {
-			tp->chipset = i;
-			goto match;
-		}
-	//if unknown chip, assume array element #0, original RTL-8169 in this case
-	printk(KERN_DEBUG PFX
-	       "PCI device %s: unknown chip version, assuming RTL-8169\n",
-	       pci_name(pdev));
-	printk(KERN_DEBUG PFX "PCI device %s: TxConfig = 0x%lx\n",
-	       pci_name(pdev), (unsigned long) RTL_R32(TxConfig));
-	tp->chipset = 0;
+	// Identify chip attached to board
+	rtl8169_get_mac_version(tp, ioaddr);
+	rtl8169_get_phy_version(tp, ioaddr);
+
+	rtl8169_print_mac_version(tp);
+	rtl8169_print_phy_version(tp);
+
+	for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
+		if (tp->mac_version == rtl_chip_info[i].mac_version)
+			break;
+	}
+	if (i < 0) {
+		/* Unknown chip: assume array element #0, original RTL-8169 */
+		printk(KERN_DEBUG PFX
+		       "PCI device %s: unknown chip version, assuming %s\n",
+		       pci_name(pdev), rtl_chip_info[0].name);
+		i++;
+	}
+	tp->chipset = i;
 
-match:
 	*ioaddr_out = ioaddr;
 	*dev_out = dev;
 	return 0;
@@ -499,7 +796,7 @@
 	dev->stop = rtl8169_close;
 	dev->tx_timeout = rtl8169_tx_timeout;
 	dev->set_multicast_list = rtl8169_set_rx_mode;
-	dev->watchdog_timeo = TX_TIMEOUT;
+	dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
 	dev->irq = pdev->irq;
 	dev->base_addr = (unsigned long) ioaddr;
 //      dev->do_ioctl           = mii_ioctl;
@@ -528,12 +825,29 @@
 	       "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
 	       "IRQ %d\n",
 	       dev->name,
-	       board_info[ent->driver_data].name,
+	       rtl_chip_info[ent->driver_data].name,
 	       dev->base_addr,
 	       dev->dev_addr[0], dev->dev_addr[1],
 	       dev->dev_addr[2], dev->dev_addr[3],
 	       dev->dev_addr[4], dev->dev_addr[5], dev->irq);
 
+	rtl8169_hw_phy_config(dev);
+
+	dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+	RTL_W8(0x82, 0x01);
+
+	if (tp->mac_version < RTL_GIGA_MAC_VER_E) {
+		dprintk("Set PCI Latency=0x40\n");
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+	}
+
+	if (tp->mac_version == RTL_GIGA_MAC_VER_D) {
+		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+		RTL_W8(0x82, 0x01);
+		dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+		mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
+	}
+
 	// if TBI is not endbled
 	if (!(RTL_R8(PHYstatus) & TBI_Enable)) {
 		int val = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
@@ -546,23 +860,23 @@
 			Cap10_100 = 0, Cap1000 = 0;
 			switch (option) {
 			case _10_Half:
-				Cap10_100 = PHY_Cap_10_Half;
+				Cap10_100 = PHY_Cap_10_Half_Or_Less;
 				Cap1000 = PHY_Cap_Null;
 				break;
 			case _10_Full:
-				Cap10_100 = PHY_Cap_10_Full;
+				Cap10_100 = PHY_Cap_10_Full_Or_Less;
 				Cap1000 = PHY_Cap_Null;
 				break;
 			case _100_Half:
-				Cap10_100 = PHY_Cap_100_Half;
+				Cap10_100 = PHY_Cap_100_Half_Or_Less;
 				Cap1000 = PHY_Cap_Null;
 				break;
 			case _100_Full:
-				Cap10_100 = PHY_Cap_100_Full;
+				Cap10_100 = PHY_Cap_100_Full_Or_Less;
 				Cap1000 = PHY_Cap_Null;
 				break;
 			case _1000_Full:
-				Cap10_100 = PHY_Cap_Null;
+				Cap10_100 = PHY_Cap_100_Full_Or_Less;
 				Cap1000 = PHY_Cap_1000_Full;
 				break;
 			default:
@@ -576,9 +890,7 @@
 
 			// enable 10/100 Full/Half Mode, leave PHY_AUTO_NEGO_REG bit4:0 unchanged
 			mdio_write(ioaddr, PHY_AUTO_NEGO_REG,
-				   PHY_Cap_10_Half | PHY_Cap_10_Full |
-				   PHY_Cap_100_Half | PHY_Cap_100_Full | (val &
-									  0x1F));
+				   PHY_Cap_100_Full_Or_Less | (val & 0x1f));
 
 			// enable 1000 Full Mode
 			mdio_write(ioaddr, PHY_1000_CTRL_REG,
@@ -647,56 +959,96 @@
 	pci_set_drvdata(pdev, NULL);
 }
 
+#ifdef CONFIG_PM
+
+static int rtl8169_suspend(struct pci_dev *pdev, u32 state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct rtl8169_private *tp = dev->priv;
+	void *ioaddr = tp->mmio_addr;
+	unsigned long flags;
+
+	if (!netif_running(dev))
+		return 0;
+	
+	netif_device_detach(dev);
+	netif_stop_queue(dev);
+	spin_lock_irqsave(&tp->lock, flags);
+
+	/* Disable interrupts, stop Rx and Tx */
+	RTL_W16(IntrMask, 0);
+	RTL_W8(ChipCmd, 0);
+		
+	/* Update the error counts. */
+	tp->stats.rx_missed_errors += RTL_R32(RxMissed);
+	RTL_W32(RxMissed, 0);
+	spin_unlock_irqrestore(&tp->lock, flags);
+	
+	return 0;
+}
+
+static int rtl8169_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (!netif_running(dev))
+	    return 0;
+
+	netif_device_attach(dev);
+	rtl8169_hw_start(dev);
+
+	return 0;
+}
+                                                                                
+#endif /* CONFIG_PM */
+
 static int
 rtl8169_open(struct net_device *dev)
 {
 	struct rtl8169_private *tp = dev->priv;
+	struct pci_dev *pdev = tp->pci_dev;
 	int retval;
-	u8 diff;
-	u32 TxPhyAddr, RxPhyAddr;
 
 	retval =
 	    request_irq(dev->irq, rtl8169_interrupt, SA_SHIRQ, dev->name, dev);
-	if (retval) {
-		return retval;
-	}
+	if (retval < 0)
+		goto out;
 
-	tp->TxDescArrays =
-	    kmalloc(NUM_TX_DESC * sizeof (struct TxDesc) + 256, GFP_KERNEL);
-	// Tx Desscriptor needs 256 bytes alignment;
-	TxPhyAddr = virt_to_bus(tp->TxDescArrays);
-	diff = 256 - (TxPhyAddr - ((TxPhyAddr >> 8) << 8));
-	TxPhyAddr += diff;
-	tp->TxDescArray = (struct TxDesc *) (tp->TxDescArrays + diff);
-
-	tp->RxDescArrays =
-	    kmalloc(NUM_RX_DESC * sizeof (struct RxDesc) + 256, GFP_KERNEL);
-	// Rx Desscriptor needs 256 bytes alignment;
-	RxPhyAddr = virt_to_bus(tp->RxDescArrays);
-	diff = 256 - (RxPhyAddr - ((RxPhyAddr >> 8) << 8));
-	RxPhyAddr += diff;
-	tp->RxDescArray = (struct RxDesc *) (tp->RxDescArrays + diff);
+	retval = -ENOMEM;
 
-	if (tp->TxDescArrays == NULL || tp->RxDescArrays == NULL) {
-		printk(KERN_INFO
-		       "Allocate RxDescArray or TxDescArray failed\n");
-		free_irq(dev->irq, dev);
-		if (tp->TxDescArrays)
-			kfree(tp->TxDescArrays);
-		if (tp->RxDescArrays)
-			kfree(tp->RxDescArrays);
-		return -ENOMEM;
-	}
-	tp->RxBufferRings = kmalloc(RX_BUF_SIZE * NUM_RX_DESC, GFP_KERNEL);
-	if (tp->RxBufferRings == NULL) {
-		printk(KERN_INFO "Allocate RxBufferRing failed\n");
-	}
+	/*
+	 * Rx and Tx desscriptors needs 256 bytes alignment.
+	 * pci_alloc_consistent provides more.
+	 */
+	tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
+					       &tp->TxPhyAddr);
+	if (!tp->TxDescArray)
+		goto err_free_irq;
+
+	tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
+					       &tp->RxPhyAddr);
+	if (!tp->RxDescArray)
+		goto err_free_tx;
+
+	retval = rtl8169_init_ring(dev);
+	if (retval < 0)
+		goto err_free_rx;
 
-	rtl8169_init_ring(dev);
 	rtl8169_hw_start(dev);
 
-	return 0;
-
+	rtl8169_request_timer(dev);
+out:
+	return retval;
+
+err_free_rx:
+	pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
+			    tp->RxPhyAddr);
+err_free_tx:
+	pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
+			    tp->TxPhyAddr);
+err_free_irq:
+	free_irq(dev->irq, dev);
+	goto out;
 }
 
 static void
@@ -733,11 +1085,17 @@
 	RTL_W32(TxConfig,
 		(TX_DMA_BURST << TxDMAShift) | (InterFrameGap <<
 						TxInterFrameGapShift));
+	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd));
+
+	if (tp->mac_version == RTL_GIGA_MAC_VER_D) {
+		dprintk(KERN_INFO PFX "Set MAC Reg C+CR Offset 0xE0: bit-3 and bit-14 MUST be 1\n");
+		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | (1 << 14) | (1 << 3));
+	}
 
 	tp->cur_rx = 0;
 
-	RTL_W32(TxDescStartAddr, virt_to_bus(tp->TxDescArray));
-	RTL_W32(RxDescStartAddr, virt_to_bus(tp->RxDescArray));
+	RTL_W32(TxDescStartAddr, tp->TxPhyAddr);
+	RTL_W32(RxDescStartAddr, tp->RxPhyAddr);
 	RTL_W8(Cfg9346, Cfg9346_Lock);
 	udelay(10);
 
@@ -755,31 +1113,131 @@
 
 }
 
-static void
-rtl8169_init_ring(struct net_device *dev)
+static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
+{
+	desc->buf_addr = 0xdeadbeef;
+	desc->status &= ~cpu_to_le32(OWNbit | RsvdMask);
+}
+
+static void rtl8169_free_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
+				struct RxDesc *desc)
+{
+	pci_unmap_single(pdev, le32_to_cpu(desc->buf_addr), RX_BUF_SIZE,
+			 PCI_DMA_FROMDEVICE);
+	dev_kfree_skb(*sk_buff);
+	*sk_buff = NULL;
+	rtl8169_make_unusable_by_asic(desc);
+}
+
+static inline void rtl8169_return_to_asic(struct RxDesc *desc)
+{
+	desc->status |= cpu_to_le32(OWNbit + RX_BUF_SIZE);
+}
+
+static inline void rtl8169_give_to_asic(struct RxDesc *desc, dma_addr_t mapping)
+{
+	desc->buf_addr = cpu_to_le32(mapping);
+	desc->status |= cpu_to_le32(OWNbit + RX_BUF_SIZE);
+}
+
+static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct net_device *dev,
+				struct sk_buff **sk_buff, struct RxDesc *desc)
+{
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int ret = 0;
+
+	skb = dev_alloc_skb(RX_BUF_SIZE);
+	if (!skb)
+		goto err_out;
+
+	skb->dev = dev;
+	skb_reserve(skb, 2);
+	*sk_buff = skb;
+
+	mapping = pci_map_single(pdev, skb->tail, RX_BUF_SIZE,
+				 PCI_DMA_FROMDEVICE);
+
+	rtl8169_give_to_asic(desc, mapping);
+
+out:
+	return ret;
+
+err_out:
+	ret = -ENOMEM;
+	rtl8169_make_unusable_by_asic(desc);
+	goto out;
+}
+
+static void rtl8169_rx_clear(struct rtl8169_private *tp)
 {
-	struct rtl8169_private *tp = dev->priv;
 	int i;
 
-	tp->cur_rx = 0;
-	tp->cur_tx = 0;
-	tp->dirty_tx = 0;
+	for (i = 0; i < NUM_RX_DESC; i++) {
+		if (tp->Rx_skbuff[i]) {
+			rtl8169_free_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
+					    tp->RxDescArray + i);
+		}
+	}
+}
+
+static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
+			   u32 start, u32 end)
+{
+	u32 cur;
+	
+	for (cur = start; end - cur > 0; cur++) {
+		int ret, i = cur % NUM_RX_DESC;
+
+		if (tp->Rx_skbuff[i])
+			continue;
+			
+		ret = rtl8169_alloc_rx_skb(tp->pci_dev, dev, tp->Rx_skbuff + i,
+					   tp->RxDescArray + i);
+		if (ret < 0)
+			break;
+	}
+	return cur - start;
+}
+
+static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
+{
+	desc->status |= cpu_to_le32(EORbit);
+}
+
+static int rtl8169_init_ring(struct net_device *dev)
+{
+	struct rtl8169_private *tp = dev->priv;
+
+	tp->cur_rx = tp->dirty_rx = 0;
+	tp->cur_tx = tp->dirty_tx = 0;
 	memset(tp->TxDescArray, 0x0, NUM_TX_DESC * sizeof (struct TxDesc));
 	memset(tp->RxDescArray, 0x0, NUM_RX_DESC * sizeof (struct RxDesc));
 
-	for (i = 0; i < NUM_TX_DESC; i++) {
-		tp->Tx_skbuff[i] = NULL;
-	}
-	for (i = 0; i < NUM_RX_DESC; i++) {
-		if (i == (NUM_RX_DESC - 1))
-			tp->RxDescArray[i].status =
-			    (OWNbit | EORbit) + RX_BUF_SIZE;
-		else
-			tp->RxDescArray[i].status = OWNbit + RX_BUF_SIZE;
+	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
+	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
 
-		tp->RxBufferRing[i] = &(tp->RxBufferRings[i * RX_BUF_SIZE]);
-		tp->RxDescArray[i].buf_addr = virt_to_bus(tp->RxBufferRing[i]);
-	}
+	if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+		goto err_out;
+
+	rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+
+	return 0;
+
+err_out:
+	rtl8169_rx_clear(tp);
+	return -ENOMEM;
+}
+
+static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
+				 struct TxDesc *desc)
+{
+	u32 len = sk_buff[0]->len;
+
+	pci_unmap_single(pdev, le32_to_cpu(desc->buf_addr),
+			 len < ETH_ZLEN ? ETH_ZLEN : len, PCI_DMA_TODEVICE);
+	desc->buf_addr = 0x00;
+	*sk_buff = NULL;
 }
 
 static void
@@ -789,9 +1247,12 @@
 
 	tp->cur_tx = 0;
 	for (i = 0; i < NUM_TX_DESC; i++) {
-		if (tp->Tx_skbuff[i] != NULL) {
-			dev_kfree_skb(tp->Tx_skbuff[i]);
-			tp->Tx_skbuff[i] = NULL;
+		struct sk_buff *skb = tp->Tx_skbuff[i];
+
+		if (skb) {
+			rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + i,
+					     tp->TxDescArray + i);
+			dev_kfree_skb(skb);
 			tp->stats.tx_dropped++;
 		}
 	}
@@ -829,48 +1290,58 @@
 	struct rtl8169_private *tp = dev->priv;
 	void *ioaddr = tp->mmio_addr;
 	int entry = tp->cur_tx % NUM_TX_DESC;
+	u32 len = skb->len;
 
-	if (skb->len < ETH_ZLEN) {
+	if (unlikely(skb->len < ETH_ZLEN)) {
 		skb = skb_padto(skb, ETH_ZLEN);
-		if (skb == NULL)
-			return 0;
+		if (!skb)
+			goto err_update_stats;
+		len = ETH_ZLEN;
 	}
 	
 	spin_lock_irq(&tp->lock);
 
-	if ((tp->TxDescArray[entry].status & OWNbit) == 0) {
+	if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
+		dma_addr_t mapping;
+
+		mapping = pci_map_single(tp->pci_dev, skb->data, len,
+					 PCI_DMA_TODEVICE);
+
 		tp->Tx_skbuff[entry] = skb;
-		tp->TxDescArray[entry].buf_addr = virt_to_bus(skb->data);
-		if (entry != (NUM_TX_DESC - 1))
-			tp->TxDescArray[entry].status =
-			    (OWNbit | FSbit | LSbit) | ((skb->len > ETH_ZLEN) ?
-							skb->len : ETH_ZLEN);
-		else
-			tp->TxDescArray[entry].status =
-			    (OWNbit | EORbit | FSbit | LSbit) |
-			    ((skb->len > ETH_ZLEN) ? skb->len : ETH_ZLEN);
+		tp->TxDescArray[entry].buf_addr = cpu_to_le32(mapping);
 
+		tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit |
+			LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC)));
+			
 		RTL_W8(TxPoll, 0x40);	//set polling bit
 
 		dev->trans_start = jiffies;
 
 		tp->cur_tx++;
-	}
+	} else
+		goto err_drop;
 
-	spin_unlock_irq(&tp->lock);
 
 	if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
 		netif_stop_queue(dev);
 	}
+out:
+	spin_unlock_irq(&tp->lock);
 
 	return 0;
+
+err_drop:
+	dev_kfree_skb(skb);
+err_update_stats:
+	tp->stats.tx_dropped++;
+	goto out;
 }
 
 static void
 rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
 		     void *ioaddr)
 {
-	unsigned long dirty_tx, tx_left = 0;
+	unsigned long dirty_tx, tx_left;
 
 	assert(dev != NULL);
 	assert(tp != NULL);
@@ -882,12 +1353,15 @@
 	while (tx_left > 0) {
 		int entry = dirty_tx % NUM_TX_DESC;
 
-		if ((tp->TxDescArray[entry].status & OWNbit) == 0) {
+		if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
 			struct sk_buff *skb = tp->Tx_skbuff[entry];
 
+			/* FIXME: is it really accurate for TxErr ? */
 			tp->stats.tx_bytes += skb->len >= ETH_ZLEN ?
 					      skb->len : ETH_ZLEN;
 			tp->stats.tx_packets++;
+			rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + entry,
+					     tp->TxDescArray + entry);
 			dev_kfree_skb_irq(skb);
 			tp->Tx_skbuff[entry] = NULL;
 			dirty_tx++;
@@ -902,70 +1376,102 @@
 	}
 }
 
+static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
+				      struct RxDesc *desc,
+				      struct net_device *dev)
+{
+	int ret = -1;
+
+	if (pkt_size < rx_copybreak) {
+		struct sk_buff *skb;
+
+		skb = dev_alloc_skb(pkt_size + 2);
+		if (skb) {
+			skb->dev = dev;
+			skb_reserve(skb, 2);
+			eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
+			*sk_buff = skb;
+			rtl8169_return_to_asic(desc);
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
 static void
 rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
 		     void *ioaddr)
 {
-	int cur_rx;
-	struct sk_buff *skb;
-	int pkt_size = 0;
+	unsigned long cur_rx, rx_left;
+	int delta;
 
 	assert(dev != NULL);
 	assert(tp != NULL);
 	assert(ioaddr != NULL);
 
 	cur_rx = tp->cur_rx;
+	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
 
-	while ((tp->RxDescArray[cur_rx].status & OWNbit) == 0) {
+	while (rx_left > 0) {
+		int entry = cur_rx % NUM_RX_DESC;
+		u32 status = le32_to_cpu(tp->RxDescArray[entry].status);
 
-		if (tp->RxDescArray[cur_rx].status & RxRES) {
+		if (status & OWNbit)
+			break;
+		if (status & RxRES) {
 			printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);
 			tp->stats.rx_errors++;
-			if (tp->RxDescArray[cur_rx].status & (RxRWT | RxRUNT))
+			if (status & (RxRWT | RxRUNT))
 				tp->stats.rx_length_errors++;
-			if (tp->RxDescArray[cur_rx].status & RxCRC)
+			if (status & RxCRC)
 				tp->stats.rx_crc_errors++;
 		} else {
-			pkt_size =
-			    (int) (tp->RxDescArray[cur_rx].
-				   status & 0x00001FFF) - 4;
-			skb = dev_alloc_skb(pkt_size + 2);
-			if (skb != NULL) {
-				skb->dev = dev;
-				skb_reserve(skb, 2);	// 16 byte align the IP fields. //
-				eth_copy_and_sum(skb, tp->RxBufferRing[cur_rx],
-						 pkt_size, 0);
-				skb_put(skb, pkt_size);
-				skb->protocol = eth_type_trans(skb, dev);
-				netif_rx(skb);
-
-				if (cur_rx == (NUM_RX_DESC - 1))
-					tp->RxDescArray[cur_rx].status =
-					    (OWNbit | EORbit) + RX_BUF_SIZE;
-				else
-					tp->RxDescArray[cur_rx].status =
-					    OWNbit + RX_BUF_SIZE;
-
-				tp->RxDescArray[cur_rx].buf_addr =
-				    virt_to_bus(tp->RxBufferRing[cur_rx]);
-				dev->last_rx = jiffies;
-				tp->stats.rx_bytes += pkt_size;
-				tp->stats.rx_packets++;
-			} else {
-				printk(KERN_WARNING
-				       "%s: Memory squeeze, deferring packet.\n",
-				       dev->name);
-				/* We should check that some rx space is free.
-				   If not, free one and mark stats->rx_dropped++. */
-				tp->stats.rx_dropped++;
+			struct RxDesc *desc = tp->RxDescArray + entry;
+			struct sk_buff *skb = tp->Rx_skbuff[entry];
+			int pkt_size = (status & 0x00001FFF) - 4;
+
+			pci_dma_sync_single(tp->pci_dev,
+					    le32_to_cpu(desc->buf_addr),
+					    RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+
+			if (rtl8169_try_rx_copy(&skb, pkt_size, desc, dev)) {
+				pci_unmap_single(tp->pci_dev,
+						 le32_to_cpu(desc->buf_addr),
+						 RX_BUF_SIZE,
+						 PCI_DMA_FROMDEVICE);
+				tp->Rx_skbuff[entry] = NULL;
 			}
-		}
-
-		cur_rx = (cur_rx + 1) % NUM_RX_DESC;
 
+			skb_put(skb, pkt_size);
+			skb->protocol = eth_type_trans(skb, dev);
+			netif_rx(skb);
+
+			dev->last_rx = jiffies;
+			tp->stats.rx_bytes += pkt_size;
+			tp->stats.rx_packets++;
+		}
+		
+		cur_rx++; 
+		rx_left--;
 	}
 
 	tp->cur_rx = cur_rx;
+
+	delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+	if (delta > 0)
+		tp->dirty_rx += delta;
+	else if (delta < 0)
+		printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+
+	/*
+	 * FIXME: until there is periodic timer to try and refill the ring,
+	 * a temporary shortage may definitely kill the Rx process.
+	 * - disable the asic to try and avoid an overflow and kick it again
+	 *   after refill ?
+	 * - how do others driver handle this condition (Uh oh...).
+	 */
+	if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
+		printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
 }
 
 /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
@@ -994,9 +1500,7 @@
 		RTL_W16(IntrStatus,
 			(status & RxFIFOOver) ? (status | RxOverflow) : status);
 
-		if ((status &
-		     (SYSErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
-		      | TxErr | TxOK | RxErr | RxOK)) == 0)
+		if (!(status & rtl8169_intr_mask))
 			break;
 
 		// Rx interrupt 
@@ -1026,11 +1530,13 @@
 rtl8169_close(struct net_device *dev)
 {
 	struct rtl8169_private *tp = dev->priv;
+	struct pci_dev *pdev = tp->pci_dev;
 	void *ioaddr = tp->mmio_addr;
-	int i;
 
 	netif_stop_queue(dev);
 
+	rtl8169_delete_timer(dev);
+
 	spin_lock_irq(&tp->lock);
 
 	/* Stop the chip's Tx and Rx DMA processes. */
@@ -1049,16 +1555,15 @@
 	free_irq(dev->irq, dev);
 
 	rtl8169_tx_clear(tp);
-	kfree(tp->TxDescArrays);
-	kfree(tp->RxDescArrays);
-	tp->TxDescArrays = NULL;
-	tp->RxDescArrays = NULL;
+
+	rtl8169_rx_clear(tp);
+
+	pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
+			    tp->RxPhyAddr);
+	pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
+			    tp->TxPhyAddr);
 	tp->TxDescArray = NULL;
 	tp->RxDescArray = NULL;
-	kfree(tp->RxBufferRings);
-	for (i = 0; i < NUM_RX_DESC; i++) {
-		tp->RxBufferRing[i] = NULL;
-	}
 
 	return 0;
 }
@@ -1112,11 +1617,26 @@
 	spin_unlock_irqrestore(&tp->lock, flags);
 }
 
+/**
+ *  rtl8169_get_stats - Get rtl8169 read/write statistics
+ *  @dev: The Ethernet Device to get statistics for
+ *
+ *  Get TX/RX statistics for rtl8169
+ */
 struct net_device_stats *
 rtl8169_get_stats(struct net_device *dev)
 {
 	struct rtl8169_private *tp = dev->priv;
+	void *ioaddr = tp->mmio_addr;
+	unsigned long flags;
 
+	if (netif_running(dev)) {
+		spin_lock_irqsave(&tp->lock, flags);
+		tp->stats.rx_missed_errors += RTL_R32(RxMissed);
+		RTL_W32(RxMissed, 0);
+		spin_unlock_irqrestore(&tp->lock, flags);
+	}
+		
 	return &tp->stats;
 }
 
@@ -1125,8 +1645,10 @@
 	.id_table	= rtl8169_pci_tbl,
 	.probe		= rtl8169_init_one,
 	.remove		= __devexit_p(rtl8169_remove_one),
-	.suspend	= NULL,
-	.resume		= NULL,
+#ifdef CONFIG_PM
+	.suspend	= rtl8169_suspend,
+	.resume		= rtl8169_resume,
+#endif
 };
 
 static int __init
diff -Nru a/drivers/net/sis190.c b/drivers/net/sis190.c
--- a/drivers/net/sis190.c	Tue Mar 30 20:12:21 2004
+++ b/drivers/net/sis190.c	Tue Mar 30 20:12:21 2004
@@ -149,6 +149,8 @@
 
 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
 
+static int rx_copybreak = 200;
+
 enum SiS190_registers {
 	TxControl		= 0x0,
 	TxDescStartAddr		= 0x4,
@@ -290,6 +292,9 @@
 	ENDbit			= 0x80000000,
 };
 
+/* FIXME: datasheet, anyone ? */
+#define RsvdMask		0x00000000
+
 struct TxDesc {
 	u32 PSize;
 	u32 status;
@@ -310,28 +315,29 @@
 	struct net_device_stats stats;	/* statistics of net device */
 	spinlock_t lock;	/* spin lock flag */
 	int chipset;
-	unsigned long cur_rx;	/* Index into the Rx descriptor buffer of next Rx pkt. */
-	unsigned long cur_tx;	/* Index into the Tx descriptor buffer of next Rx pkt. */
-	unsigned long dirty_tx;
+	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
+	u32 cur_tx; /* Index into the Tx descriptor buffer of next Tx pkt. */
+	u32 dirty_rx;
+	u32 dirty_tx;
 	dma_addr_t tx_dma;
 	dma_addr_t rx_dma;
 	struct TxDesc *TxDescArray;	/* Index of 256-alignment Tx Descriptor buffer */
 	struct RxDesc *RxDescArray;	/* Index of 256-alignment Rx Descriptor buffer */
-	unsigned char *RxBufferRings;	/* Index of Rx Buffer  */
-	unsigned char *RxBufferRing[NUM_RX_DESC];	/* Index of Rx Buffer array */
-	struct sk_buff *Tx_skbuff[NUM_TX_DESC];	/* Index of Transmit data buffer */
+	struct sk_buff *Rx_skbuff[NUM_TX_DESC];	/* Rx data buffer */
+	struct sk_buff *Tx_skbuff[NUM_TX_DESC];	/* Tx data buffer */
 };
 
 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>");
 MODULE_DESCRIPTION("SiS SiS190 Gigabit Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
 
 static int SiS190_open(struct net_device *dev);
 static int SiS190_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t SiS190_interrupt(int irq, void *dev_instance,
 				    struct pt_regs *regs);
-static void SiS190_init_ring(struct net_device *dev);
+static int SiS190_init_ring(struct net_device *dev);
 static void SiS190_hw_start(struct net_device *dev);
 static int SiS190_close(struct net_device *dev);
 static void SiS190_set_rx_mode(struct net_device *dev);
@@ -435,6 +441,18 @@
 	return data;
 }
 
+static void SiS190_release_board(struct pci_dev *pdev, struct net_device *dev)
+{
+	struct sis190_private *tp = dev->priv;
+
+	assert(tp != NULL);
+
+	iounmap(tp->mmio_addr);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	free_netdev(dev);
+}
+
 static int __devinit
 SiS190_init_board(struct pci_dev *pdev, struct net_device **dev_out,
 		  void **ioaddr_out)
@@ -599,10 +617,7 @@
 	spin_lock_init(&tp->lock);
 	rc = register_netdev(dev);
 	if (rc) {
-		iounmap(ioaddr);
-		pci_release_regions(pdev);
-		pci_disable_device(pdev);
-		free_netdev(dev);
+		SiS190_release_board(pdev, dev);
 		return rc;
 	}
 
@@ -694,16 +709,13 @@
 SiS190_remove_one(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
-	struct sis190_private *tp = (struct sis190_private *) (dev->priv);
 
 	assert(dev != NULL);
-	assert(tp != NULL);
 
 	unregister_netdev(dev);
-	iounmap(tp->mmio_addr);
-	pci_release_regions(pdev);
 
-	free_netdev(dev);
+	SiS190_release_board(pdev, dev);
+
 	pci_set_drvdata(pdev, NULL);
 }
 
@@ -714,50 +726,43 @@
 	int rc;
 
 	rc = request_irq(dev->irq, SiS190_interrupt, SA_SHIRQ, dev->name, dev);
-	if (rc)
+	if (rc < 0)
 		goto out;
 
+	rc = -ENOMEM;
+
 	/*
 	 * Rx and Tx descriptors need 256 bytes alignment.
 	 * pci_alloc_consistent() guarantees a stronger alignment.
 	 */
 	tp->TxDescArray = pci_alloc_consistent(tp->pci_dev, TX_DESC_TOTAL_SIZE,
 		&tp->tx_dma);
-	if (!tp->TxDescArray) {
-		rc = -ENOMEM;
-		goto err_out;
-	}
+	if (!tp->TxDescArray)
+		goto err_free_irq;
 
 	tp->RxDescArray = pci_alloc_consistent(tp->pci_dev, RX_DESC_TOTAL_SIZE,
 		&tp->rx_dma);
-	if (!tp->RxDescArray) {
-		rc = -ENOMEM;
-		goto err_out_free_tx;
-	}
+	if (!tp->RxDescArray)
+		goto err_free_tx;
 
-	tp->RxBufferRings = kmalloc(RX_BUF_SIZE * NUM_RX_DESC, GFP_KERNEL);
-	if (tp->RxBufferRings == NULL) {
-		printk(KERN_INFO "%s: allocate RxBufferRing failed\n",
-			dev->name);
-		rc = -ENOMEM;
-		goto err_out_free_rx;
-	}
+	rc = SiS190_init_ring(dev);
+	if (rc < 0)
+		goto err_free_rx;
 
-	SiS190_init_ring(dev);
 	SiS190_hw_start(dev);
 
 out:
 	return rc;
 
-err_out_free_rx:
+err_free_rx:
 	pci_free_consistent(tp->pci_dev, RX_DESC_TOTAL_SIZE, tp->RxDescArray,
 		tp->rx_dma);
-err_out_free_tx:
+err_free_tx:
 	pci_free_consistent(tp->pci_dev, TX_DESC_TOTAL_SIZE, tp->TxDescArray,
 		tp->tx_dma);
-err_out:
+err_free_irq:
 	free_irq(dev->irq, dev);
-	return rc;
+	goto out;
 }
 
 static void
@@ -807,37 +812,125 @@
 
 }
 
-static void
-SiS190_init_ring(struct net_device *dev)
+static inline void sis190_mark_as_last_descriptor(struct RxDesc *desc)
+{
+	desc->buf_Len |= cpu_to_le32(ENDbit);
+}
+
+static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
+{
+	desc->buf_addr = 0xdeadbeef;
+	desc->status &= ~cpu_to_le32(OWNbit | RsvdMask);
+}
+
+static void sis190_free_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
+			       struct RxDesc *desc)
+{
+	pci_unmap_single(pdev, le32_to_cpu(desc->buf_addr), RX_BUF_SIZE,
+			 PCI_DMA_FROMDEVICE);
+	dev_kfree_skb(*sk_buff);
+	*sk_buff = NULL;
+	sis190_make_unusable_by_asic(desc);
+}
+
+
+static inline void sis190_return_to_asic(struct RxDesc *desc)
+{
+	desc->PSize = 0x0;
+	desc->status |= cpu_to_le32(OWNbit | INTbit);
+}
+
+static inline void sis190_give_to_asic(struct RxDesc *desc, dma_addr_t mapping)
+{
+	desc->buf_addr = cpu_to_le32(mapping);
+	desc->status |= cpu_to_le32(OWNbit | INTbit);
+}
+
+static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct net_device *dev,
+			       struct sk_buff **sk_buff, struct RxDesc *desc)
+{
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int ret = 0;
+
+	skb = dev_alloc_skb(RX_BUF_SIZE + 2);
+	if (!skb)
+		goto err_out;
+
+	skb->dev = dev;
+	skb_reserve(skb, 2);
+	*sk_buff = skb;
+
+	mapping = pci_map_single(pdev, skb->tail, RX_BUF_SIZE,
+				 PCI_DMA_FROMDEVICE);
+
+	desc->PSize = 0x0;
+	desc->buf_Len |= cpu_to_le32(RX_BUF_SIZE);
+
+	sis190_give_to_asic(desc, mapping);
+
+out:
+	return ret;
+
+err_out:
+	ret = -ENOMEM;
+	sis190_make_unusable_by_asic(desc);
+	goto out;
+}
+
+static void sis190_rx_clear(struct sis190_private *tp)
 {
-	struct sis190_private *tp = dev->priv;
 	int i;
 
-	tp->cur_rx = 0;
-	tp->cur_tx = 0;
-	tp->dirty_tx = 0;
+	for (i = 0; i < NUM_RX_DESC; i++) {
+		if (tp->Rx_skbuff[i]) {
+			sis190_free_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
+					   tp->RxDescArray + i);
+		}
+	}
+}
+
+static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
+			  u32 start, u32 end)
+{
+	u32 cur;
+
+	for (cur = start; end - cur > 0; cur++) {
+		int ret, i = cur % NUM_RX_DESC;
+
+		if (tp->Rx_skbuff[i])
+			continue;
+
+		ret = sis190_alloc_rx_skb(tp->pci_dev, dev, tp->Rx_skbuff + i,
+					  tp->RxDescArray + i);
+		if (ret < 0)
+			break;
+	}
+	return cur - start;
+}
+
+static int SiS190_init_ring(struct net_device *dev)
+{
+	struct sis190_private *tp = dev->priv;
+
+	tp->cur_rx = tp->dirty_rx = 0;
+	tp->cur_tx = tp->dirty_tx = 0;
 	memset(tp->TxDescArray, 0x0, NUM_TX_DESC * sizeof (struct TxDesc));
 	memset(tp->RxDescArray, 0x0, NUM_RX_DESC * sizeof (struct RxDesc));
 
-	for (i = 0; i < NUM_TX_DESC; i++) {
-		tp->Tx_skbuff[i] = NULL;
-	}
-	for (i = 0; i < NUM_RX_DESC; i++) {
-		struct RxDesc *desc = tp->RxDescArray + i;
+	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
+	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
 
-		desc->PSize = 0x0;
+	if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+		goto err_out;
 
-		if (i == (NUM_RX_DESC - 1))
-			desc->buf_Len = BIT_31 + RX_BUF_SIZE;	//bit 31 is End bit
-		else
-			desc->buf_Len = RX_BUF_SIZE;
+	sis190_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
 
-		tp->RxBufferRing[i] = tp->RxBufferRings + i * RX_BUF_SIZE;
-		desc->buf_addr = pci_map_single(tp->pci_dev,
-			tp->RxBufferRing[i], RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-		desc->status = OWNbit | INTbit;
-	}
+	return 0;
 
+err_out:
+	sis190_rx_clear(tp);
+	return -ENOMEM;
 }
 
 static void
@@ -990,70 +1083,86 @@
 	}
 }
 
+static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
+				     struct RxDesc *desc,
+				     struct net_device *dev)
+{
+	int ret = -1;
+
+	if (pkt_size < rx_copybreak) {
+		struct sk_buff *skb;
+
+		skb = dev_alloc_skb(pkt_size + 2);
+		if (skb) {
+			skb->dev = dev;
+			skb_reserve(skb, 2);
+			eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
+			*sk_buff = skb;
+			sis190_return_to_asic(desc);
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
 static void
 SiS190_rx_interrupt(struct net_device *dev, struct sis190_private *tp,
 		    void *ioaddr)
 {
-	int cur_rx = tp->cur_rx;
-	struct RxDesc *desc = tp->RxDescArray + cur_rx;
+	unsigned long cur_rx, rx_left;
+	int delta;
 
 	assert(dev != NULL);
 	assert(tp != NULL);
 	assert(ioaddr != NULL);
 
-	while ((desc->status & OWNbit) == 0) {
+	cur_rx = tp->cur_rx;
+	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+
+	while (rx_left > 0) {
+		int entry = cur_rx % NUM_RX_DESC;
+		struct RxDesc *desc = tp->RxDescArray + entry;
+		u32 status = le32_to_cpu(desc->status);
+		
+		if (status & OWNbit)
+			break;
 
-		if (desc->PSize & 0x0080000) {
+		if (cpu_to_le32(desc->PSize) & RxCRC) {
 			printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);
 			tp->stats.rx_errors++;
 			tp->stats.rx_length_errors++;
-		} else if (!(desc->PSize & 0x0010000)) {
+		} else if (!(cpu_to_le32(desc->PSize) & 0x0010000)) {
 			printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);
 			tp->stats.rx_errors++;
 			tp->stats.rx_crc_errors++;
 		} else {
-			struct sk_buff *skb;
-			int pkt_size;
-
-			pkt_size = (int) (desc->PSize & 0x0000FFFF) - 4;
-			skb = dev_alloc_skb(pkt_size + 2);
-			if (skb != NULL) {
-				skb->dev = dev;
-				skb_reserve(skb, 2);	// 16 byte align the IP fields. //
-				pci_dma_sync_single_for_cpu(tp->pci_dev,
-							    desc->buf_addr,
-							    RX_BUF_SIZE,
-							    PCI_DMA_FROMDEVICE);
-				eth_copy_and_sum(skb, tp->RxBufferRing[cur_rx],
-						 pkt_size, 0);
-				pci_dma_sync_single_for_device(tp->pci_dev,
-							    desc->buf_addr,
-							    RX_BUF_SIZE,
-							    PCI_DMA_FROMDEVICE);
-				skb_put(skb, pkt_size);
-				skb->protocol = eth_type_trans(skb, dev);
-				netif_rx(skb);
-
-				desc->PSize = 0x0;
-
-				if (cur_rx == (NUM_RX_DESC - 1))
-					desc->buf_Len = ENDbit + RX_BUF_SIZE;
-				else
-					desc->buf_Len = RX_BUF_SIZE;
-
-				dev->last_rx = jiffies;
-				tp->stats.rx_bytes += pkt_size;
-				tp->stats.rx_packets++;
-
-				desc->status = OWNbit | INTbit;
-			} else {
-				printk(KERN_WARNING
-				       "%s: Memory squeeze, deferring packet.\n",
-				       dev->name);
-				/* We should check that some rx space is free.
-				   If not, free one and mark stats->rx_dropped++. */
-				tp->stats.rx_dropped++;
-			}
+			struct sk_buff *skb = tp->Rx_skbuff[entry];
+			void (*dma_op)(struct pci_dev *, dma_addr_t, size_t,
+				       int);
+			int pkt_size; 
+
+			pkt_size = (cpu_to_le32(desc->PSize) & 0x0000FFFF) - 4;
+
+			dma_op = pci_dma_sync_single_for_cpu;
+			dma_op(tp->pci_dev, le32_to_cpu(desc->buf_addr),
+			       RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+
+			if (sis190_try_rx_copy(&skb, pkt_size, desc, dev) < 0) {
+				tp->Rx_skbuff[entry] = NULL;
+				dma_op = pci_unmap_single;
+			} else
+				dma_op = pci_dma_sync_single_for_device;
+
+			dma_op(tp->pci_dev, le32_to_cpu(desc->buf_addr),
+			       RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+
+			skb_put(skb, pkt_size);
+			skb->protocol = eth_type_trans(skb, dev);
+			netif_rx(skb);
+
+			dev->last_rx = jiffies;
+			tp->stats.rx_bytes += pkt_size;
+			tp->stats.rx_packets++;
 		}
 
 		cur_rx = (cur_rx + 1) % NUM_RX_DESC;
@@ -1061,6 +1170,15 @@
 	}
 
 	tp->cur_rx = cur_rx;
+
+	delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+	if (delta > 0)
+		tp->dirty_rx += delta;
+	else if (delta < 0)
+		printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+
+	if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
+		printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
 }
 
 /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
@@ -1113,7 +1231,6 @@
 {
 	struct sis190_private *tp = dev->priv;
 	void *ioaddr = tp->mmio_addr;
-	int i;
 
 	netif_stop_queue(dev);
 
@@ -1136,18 +1253,13 @@
 	free_irq(dev->irq, dev);
 
 	SiS190_tx_clear(tp);
+	sis190_rx_clear(tp);
 	pci_free_consistent(tp->pci_dev, TX_DESC_TOTAL_SIZE, tp->TxDescArray,
 		tp->tx_dma);
 	pci_free_consistent(tp->pci_dev, RX_DESC_TOTAL_SIZE, tp->RxDescArray,
 		tp->rx_dma);
 	tp->TxDescArray = NULL;
-	for (i = 0; i < NUM_RX_DESC; i++) {
-		pci_unmap_single(tp->pci_dev, tp->RxDescArray[i].buf_addr,
-			RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-		tp->RxBufferRing[i] = NULL;
-	}
 	tp->RxDescArray = NULL;
-	kfree(tp->RxBufferRings);
 
 	return 0;
 }
diff -Nru a/drivers/net/wan/lmc/lmc_ver.h b/drivers/net/wan/lmc/lmc_ver.h
--- a/drivers/net/wan/lmc/lmc_ver.h	Tue Mar 30 20:12:21 2004
+++ /dev/null	Wed Dec 31 16:00:00 1969
@@ -1,123 +0,0 @@
-#include <linux/version.h>
-
-#ifndef _IF_LMC_LINUXVER_
-#define _IF_LMC_LINUXVER_
-
- /*
-  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
-  * All rights reserved.  www.lanmedia.com
-  *
-  * This code is written by:
-  * Andrew Stanley-Jones (asj@cban.com)
-  * Rob Braun (bbraun@vix.com),
-  * Michael Graff (explorer@vix.com) and
-  * Matt Thomas (matt@3am-software.com).
-  *
-  * This software may be used and distributed according to the terms
-  * of the GNU General Public License version 2, incorporated herein by reference.
-  */
-
- /*
-  * This file defines and controls all linux version
-  * differences.
-  *
-  * This is being done to keep 1 central location where all linux
-  * version differences can be kept and maintained.  as this code was
-  * found version issues where pepered throughout the source code and
-  * made the souce code not only hard to read but version problems hard
-  * to track down.  If I'm overiding a function/etc with something in
-  * this file it will be prefixed by "LMC_" which will mean look
-  * here for the version dependent change that's been done.
-  *
-  */
-
-#if LINUX_VERSION_CODE < 0x20363
-#define net_device device
-#endif
-
-#if LINUX_VERSION_CODE < 0x20363
-#define LMC_XMITTER_BUSY(x) (x)->tbusy = 1
-#define LMC_XMITTER_FREE(x) (x)->tbusy = 0
-#define LMC_XMITTER_INIT(x) (x)->tbusy = 0
-#else
-#define LMC_XMITTER_BUSY(x) netif_stop_queue(x)
-#define LMC_XMITTER_FREE(x) netif_wake_queue(x)
-#define LMC_XMITTER_INIT(x) netif_start_queue(x)
-
-#endif
-
-
-#if LINUX_VERSION_CODE < 0x20100
-//typedef unsigned int u_int32_t;
-
-#define  LMC_SETUP_20_DEV {\
-                             int indx; \
-                             for (indx = 0; indx < DEV_NUMBUFFS; indx++) \
-                                skb_queue_head_init (&dev->buffs[indx]); \
-                          } \
-                          dev->family = AF_INET; \
-                          dev->pa_addr = 0; \
-                          dev->pa_brdaddr = 0; \
-                          dev->pa_mask = 0xFCFFFFFF; \
-                          dev->pa_alen = 4;		/* IP addr.  sizeof(u32) */
-
-#else
-
-#define LMC_SETUP_20_DEV
-
-#endif
-
-
-#if LINUX_VERSION_CODE < 0x20155 /* basically 2.2 plus */
-
-#define LMC_DEV_KFREE_SKB(skb) dev_kfree_skb((skb), FREE_WRITE)
-
-#else /* Mostly 2.0 kernels */
-
-#define LMC_DEV_KFREE_SKB(skb) dev_kfree_skb(skb)
-
-#endif
-
-#if LINUX_VERSION_CODE < 0x20200
-#else
-
-#endif
-
-#if LINUX_VERSION_CODE < 0x20100
-#define LMC_SKB_FREE(skb, val) (skb->free = val)
-#else
-#define LMC_SKB_FREE(skb, val)
-#endif
-
-
-#if (LINUX_VERSION_CODE >= 0x20200)
-
-#define LMC_SPIN_FLAGS                unsigned long flags;
-#define LMC_SPIN_LOCK_INIT(x)         spin_lock_init(&(x)->lmc_lock);
-#define LMC_SPIN_UNLOCK(x)            ((x)->lmc_lock = SPIN_LOCK_UNLOCKED)
-#define LMC_SPIN_LOCK_IRQSAVE(x)      spin_lock_irqsave (&(x)->lmc_lock, flags);
-#define LMC_SPIN_UNLOCK_IRQRESTORE(x) spin_unlock_irqrestore (&(x)->lmc_lock, flags);
-#else
-#define LMC_SPIN_FLAGS
-#define LMC_SPIN_LOCK_INIT(x)
-#define LMC_SPIN_UNLOCK(x)
-#define LMC_SPIN_LOCK_IRQSAVE(x)
-#define LMC_SPIN_UNLOCK_IRQRESTORE(x)
-#endif
-
-
-#if LINUX_VERSION_CODE >= 0x20100
-#define LMC_COPY_FROM_USER(x, y, z) if(copy_from_user ((x), (y), (z))) return -EFAULT
-#define LMC_COPY_TO_USER(x, y, z) if(copy_to_user ((x), (y), (z))) return -EFAULT
-#else
-#define LMC_COPY_FROM_USER(x, y, z) if(verify_area(VERIFY_READ, (y), (z))) \
-			               return -EFAULT; \
-                                    memcpy_fromfs ((x), (y), (z))
-
-#define LMC_COPY_TO_USER(x, y, z)   if(verify_area(VERIFY_WRITE, (x), (z))) \
-	                               return -EFAULT; \
-                                    memcpy_tofs ((x), (y), (z))
-#endif
-
-
-#endif