patch-2.4.15 linux/drivers/net/acenic.c
Next file: linux/drivers/net/acenic.h
Previous file: linux/drivers/net/Config.in
Back to the patch index
Back to the overall index
- Lines: 325
- Date:
Mon Nov 19 15:19:42 2001
- Orig file:
v2.4.14/linux/drivers/net/acenic.c
- Orig date:
Mon Nov 5 15:55:30 2001
diff -u --recursive --new-file v2.4.14/linux/drivers/net/acenic.c linux/drivers/net/acenic.c
@@ -208,8 +208,32 @@
(((u64)(mask) & 0xffffffff00000000) == 0 ? 0 : -EIO)
#define pci_dma_supported(dev, mask) \
(((u64)(mask) & 0xffffffff00000000) == 0 ? 1 : 0)
+
+#elif (LINUX_VERSION_CODE < 0x02040d)
+
+/*
+ * 2.4.13 introduced pci_map_page()/pci_unmap_page() - for 2.4.12 and prior,
+ * fall back on pci_map_single()/pci_unnmap_single().
+ *
+ * We are guaranteed that the page is mapped at this point since
+ * pci_map_page() is only used upon valid struct skb's.
+ */
+static inline dma_addr_t
+pci_map_page(struct pci_dev *cookie, struct page *page, unsigned long off,
+ size_t size, int dir)
+{
+ void *page_virt;
+
+ page_virt = page_address(page);
+ if (!page_virt)
+ BUG();
+ return pci_map_single(cookie, (page_virt + off), size, dir);
+}
+#define pci_unmap_page(cookie, dma_addr, size, dir) \
+ pci_unmap_single(cookie, dma_addr, size, dir)
#endif
+
#if (LINUX_VERSION_CODE < 0x02032b)
/*
* SoftNet
@@ -525,7 +549,7 @@
static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
static char version[] __initdata =
- "acenic.c: v0.83 09/30/2001 Jes Sorensen, linux-acenic@SunSITE.dk\n"
+ "acenic.c: v0.85 11/08/2001 Jes Sorensen, linux-acenic@SunSITE.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
static struct net_device *root_dev;
@@ -538,7 +562,6 @@
#ifdef NEW_NETINIT
struct net_device *dev;
#endif
-
struct ace_private *ap;
struct pci_dev *pdev = NULL;
int boards_found = 0;
@@ -738,6 +761,7 @@
kfree(dev);
continue;
}
+
if (ap->pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
@@ -767,12 +791,14 @@
MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM_DESC(link, "Acenic/3C985/NetGear link state");
-MODULE_PARM_DESC(trace, "Acenic/3C985/NetGear firmware trace level");
+MODULE_PARM(tx_ratio, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
+MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
+MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
#endif
@@ -911,8 +937,7 @@
RX_JUMBO_RING_ENTRIES +
RX_MINI_RING_ENTRIES +
RX_RETURN_RING_ENTRIES));
- pci_free_consistent(ap->pdev, size,
- ap->rx_std_ring,
+ pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
ap->rx_ring_base_dma);
ap->rx_std_ring = NULL;
ap->rx_jumbo_ring = NULL;
@@ -921,8 +946,7 @@
}
if (ap->evt_ring != NULL) {
size = (sizeof(struct event) * EVT_RING_ENTRIES);
- pci_free_consistent(ap->pdev, size,
- ap->evt_ring,
+ pci_free_consistent(ap->pdev, size, ap->evt_ring,
ap->evt_ring_dma);
ap->evt_ring = NULL;
}
@@ -933,7 +957,8 @@
}
if (ap->rx_ret_prd != NULL) {
pci_free_consistent(ap->pdev, sizeof(u32),
- (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma);
+ (void *)ap->rx_ret_prd,
+ ap->rx_ret_prd_dma);
ap->rx_ret_prd = NULL;
}
if (ap->tx_csm != NULL) {
@@ -1051,8 +1076,8 @@
struct ace_private *ap;
struct ace_regs *regs;
struct ace_info *info = NULL;
- u64 tmp_ptr;
unsigned long myjif;
+ u64 tmp_ptr;
u32 tig_ver, mac1, mac2, tmp, pci_state;
int board_idx, ecode = 0;
short i;
@@ -1306,9 +1331,9 @@
/*
* Configure DMA attributes.
*/
- if (!pci_set_dma_mask(ap->pdev, (u64) 0xffffffffffffffff)) {
+ if (!pci_set_dma_mask(ap->pdev, 0xffffffffffffffffULL)) {
ap->pci_using_dac = 1;
- } else if (!pci_set_dma_mask(ap->pdev, (u64) 0xffffffff)) {
+ } else if (!pci_set_dma_mask(ap->pdev, 0xffffffffULL)) {
ap->pci_using_dac = 0;
} else {
ecode = -ENODEV;
@@ -1362,7 +1387,7 @@
ace_load_firmware(dev);
ap->fw_running = 0;
- tmp_ptr = (u64) ap->info_dma;
+ tmp_ptr = ap->info_dma;
writel(tmp_ptr >> 32, ®s->InfoPtrHi);
writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
@@ -1428,7 +1453,8 @@
(RX_STD_RING_ENTRIES +
RX_JUMBO_RING_ENTRIES))));
info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
- info->rx_mini_ctrl.flags = RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR;
+ info->rx_mini_ctrl.flags =
+ RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR;
for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
ap->rx_mini_ring[i].flags =
@@ -1712,11 +1738,13 @@
dev->name, (unsigned int)readl(®s->HostCtrl));
/* This can happen due to ieee flow control. */
} else {
- printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n", dev->name);
+ printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
+ dev->name);
netif_wake_queue(dev);
}
}
+
static void ace_tasklet(unsigned long dev)
{
struct ace_private *ap = ((struct net_device *)dev)->priv;
@@ -1747,7 +1775,7 @@
if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
!test_and_set_bit(0, &ap->jumbo_refill_busy)) {
#if DEBUG
- printk("refilling jumbo buffers (current %i)\n", >cur_size);
+ printk("refilling jumbo buffers (current %i)\n", cur_size);
#endif
ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
}
@@ -1799,10 +1827,8 @@
* Make sure IP header starts on a fresh cache line.
*/
skb_reserve(skb, 2 + 16);
- mapping = pci_map_page(ap->pdev,
- virt_to_page(skb->data),
- ((unsigned long) skb->data &
- ~PAGE_MASK),
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ ((unsigned long)skb->data & ~PAGE_MASK),
ACE_STD_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
@@ -1866,10 +1892,8 @@
* Make sure the IP header ends up on a fresh cache line
*/
skb_reserve(skb, 2 + 16);
- mapping = pci_map_page(ap->pdev,
- virt_to_page(skb->data),
- ((unsigned long) skb->data &
- ~PAGE_MASK),
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ ((unsigned long)skb->data & ~PAGE_MASK),
ACE_MINI_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
@@ -1928,10 +1952,8 @@
* Make sure the IP header ends up on a fresh cache line
*/
skb_reserve(skb, 2 + 16);
- mapping = pci_map_page(ap->pdev,
- virt_to_page(skb->data),
- ((unsigned long) skb->data &
- ~PAGE_MASK),
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ ((unsigned long)skb->data & ~PAGE_MASK),
ACE_JUMBO_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
@@ -2499,7 +2521,7 @@
mapping = info->mapping;
if (mapping) {
- memset(ap->tx_ring+i, 0, sizeof(struct tx_desc));
+ memset(ap->tx_ring + i, 0, sizeof(struct tx_desc));
pci_unmap_page(ap->pdev, mapping, info->maplen,
PCI_DMA_TODEVICE);
info->mapping = 0;
@@ -2523,24 +2545,23 @@
return 0;
}
+
static inline dma_addr_t
ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
struct sk_buff *tail, u32 idx)
{
- unsigned long addr;
+ dma_addr_t mapping;
struct tx_ring_info *info;
- addr = pci_map_page(ap->pdev,
- virt_to_page(skb->data),
- ((unsigned long) skb->data &
- ~PAGE_MASK),
- skb->len, PCI_DMA_TODEVICE);
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ ((unsigned long) skb->data & ~PAGE_MASK),
+ skb->len, PCI_DMA_TODEVICE);
info = ap->skb->tx_skbuff + idx;
info->skb = tail;
- info->mapping = addr;
+ info->mapping = mapping;
info->maplen = skb->len;
- return addr;
+ return mapping;
}
@@ -2581,9 +2602,9 @@
if (!skb_shinfo(skb)->nr_frags)
#endif
{
- unsigned long addr;
+ dma_addr_t mapping;
- addr = ace_map_tx_skb(ap, skb, skb, idx);
+ mapping = ace_map_tx_skb(ap, skb, skb, idx);
flagsize = (skb->len << 16) | (BD_FLG_END);
if (skb->ip_summed == CHECKSUM_HW)
flagsize |= BD_FLG_TCP_UDP_SUM;
@@ -2594,42 +2615,40 @@
if (tx_ring_full(ap->tx_ret_csm, idx))
flagsize |= BD_FLG_COAL_NOW;
- ace_load_tx_bd(desc, addr, flagsize);
+ ace_load_tx_bd(desc, mapping, flagsize);
}
#if MAX_SKB_FRAGS
else {
- unsigned long addr;
+ dma_addr_t mapping;
int i, len = 0;
- addr = ace_map_tx_skb(ap, skb, NULL, idx);
+ mapping = ace_map_tx_skb(ap, skb, NULL, idx);
flagsize = ((skb->len - skb->data_len) << 16);
if (skb->ip_summed == CHECKSUM_HW)
flagsize |= BD_FLG_TCP_UDP_SUM;
- ace_load_tx_bd(ap->tx_ring + idx, addr, flagsize);
+ ace_load_tx_bd(ap->tx_ring + idx, mapping, flagsize);
idx = (idx + 1) % TX_RING_ENTRIES;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
- dma_addr_t phys;
len += frag->size;
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
- phys = pci_map_page(ap->pdev, frag->page,
- frag->page_offset,
- frag->size,
- PCI_DMA_TODEVICE);
+ mapping = pci_map_page(ap->pdev, frag->page,
+ frag->page_offset, frag->size,
+ PCI_DMA_TODEVICE);
flagsize = (frag->size << 16);
if (skb->ip_summed == CHECKSUM_HW)
flagsize |= BD_FLG_TCP_UDP_SUM;
idx = (idx + 1) % TX_RING_ENTRIES;
- if (i == skb_shinfo(skb)->nr_frags-1) {
+ if (i == skb_shinfo(skb)->nr_frags - 1) {
flagsize |= BD_FLG_END;
if (tx_ring_full(ap->tx_ret_csm, idx))
flagsize |= BD_FLG_COAL_NOW;
@@ -2642,9 +2661,9 @@
} else {
info->skb = NULL;
}
- info->mapping = phys;
+ info->mapping = mapping;
info->maplen = frag->size;
- ace_load_tx_bd(desc, phys, flagsize);
+ ace_load_tx_bd(desc, mapping, flagsize);
}
}
#endif
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)