/* s3c4510b.c * linux/deriver/net/s3c4510.c * Ethernet driver for Samsung 4510B * Copyright (C) 2002 Mac Wang * * Version 0.2 (Liu Tao ): * - Zero-copy transmit and receive. * - Code clean up. */ /* * -Version 0.2.01 * -modified by liuyanguo in Aug 20th 2009 * * */
#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/skbuff.h> #include <linux/sockios.h> //do_ioctl --int cmd -- #include <linux/if.h> //do_ioctl --struct ifreq #include <asm/irq.h> #include <asm/hardware.h> #include "s3c4510.h"
#define DRV_NAME "S3C4510 ether driver" #define DRV_VERSION "v0.2.01 (test10)" #define DRV_RELDATE "20-Aug-2009"
static char version[] __devinitdata = DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
#define RX_RING_SIZE 64 #define TX_RING_SIZE 32 #define PKT_BUF_SZ 1536
#define RX_REFILL_CNT 32 #define TX_TIMEOUT (4*HZ)
/* Register values. */ enum { gMACCON = FullDup, gMACTXCON = EnComp | TxEn, gMACRXCON = RxEn | StripCRC, gBDMATXCON = BTxBRST | BTxMSL110 | BTxSTSKO | BTxEn, gBDMARXCON = BRxDIE | BRxEn | BRxLittle | BRxMAINC | BRxBRST | BRxSTSKO | BRxWA10 | BRxNOIE, gCAMCON = CompEn | BroadAcc };
/* The Rx and Tx buffer descriptors. */ struct netdev_desc { u32 data_ptr; u32 misc_ctl; u32 stat_len; u32 next_ptr; };
/*-----------------------* * MII reg_data * *-----------------------*/ struct mii_data{ u16 phy_reg0; u16 phy_reg1; u16 phy_id1; u16 phy_id2; };
/*-----------------------* * MII support functions * *-----------------------*/ void MiiStationWrite(u32 PhyInAddr, u32 PhyAddr, u32 PhyWrData) { CSR_WRITE(REG_STADATA, PhyWrData); CSR_WRITE(REG_STACON, PhyInAddr | PhyAddr | MiiBusy | PHYREGWRITE); while( (CSR_READ(REG_STACON) & MiiBusy) ) ; delay_physet() ; } u32 MiiStationRead(u32 PhyInAddr, u32 PhyAddr) { u32 PhyRdData ; CSR_WRITE(REG_STACON, PhyInAddr | PhyAddr | MiiBusy); while( (CSR_READ(REG_STACON) & MiiBusy) ) ; PhyRdData = CSR_READ(REG_STADATA); return PhyRdData ; }
void delay_physet(void) { int i = 1000 ; while(i--) ; }
/* Device private data. */ struct netdev_private { struct netdev_desc rx_ring_buf[RX_RING_SIZE]; struct netdev_desc tx_ring_buf[TX_RING_SIZE]; struct sk_buff *rx_skbuff[RX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE];
spinlock_t lock; //spinlock
unsigned int cur_rx, dirty_rx; volatile struct netdev_desc *rx_ring; unsigned int cur_tx, dirty_tx; volatile struct netdev_desc *tx_ring;
struct tasklet_struct rx_tasklet; struct net_device_stats stats; } __attribute__((aligned (L1_CACHE_BYTES)));
static inline void invalidate_cache(void *addr, u32 len); static inline void wbflush(void); static void init_ring(struct net_device *dev); static void rx_intr_handler(int irq, void *dev_instance, struct pt_regs *regs); static void tx_intr_handler(int irq, void *dev_instance, struct pt_regs *regs); static void refill_rx(unsigned long data); static int start_tx(struct sk_buff *skb, struct net_device *dev); static int netdev_open(struct net_device *dev); static int netdev_close(struct net_device *dev); static struct net_device_stats *get_stats(struct net_device *dev); //add...... static int netdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Invalidate buffer cache, buf len should <= 8K byte. */ static inline void invalidate_cache(void *addr, u32 len) { /* Invalidate buffer cache. */ u32 entry;
entry = ((u32)addr >> 4) & 0xff; len = (len + 15) >> 4;
while (len--) { ((u32 *)(TAG_BASE))[entry] = 0; entry = (entry + 1) & 0xff; } }
/* Write buffer flush. */ static inline void wbflush(void) { volatile unsigned int i;
i = CSR_READ(NOCACHE_BIT); }
/* Initialize descriptor rings. */ static void init_ring(struct net_device *dev) { struct netdev_private *np = (struct netdev_private *)dev->priv; int i;
/* Initialize descriptor pointers. */ np->rx_ring = (struct netdev_desc *)((u32)np->rx_ring_buf | NOCACHE_BIT); np->tx_ring = (struct netdev_desc *)((u32)np->tx_ring_buf | NOCACHE_BIT); np->cur_rx = np->dirty_rx = 0; np->cur_tx = np->dirty_tx = 0;
/* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].data_ptr = 0; np->rx_ring[i].misc_ctl = 0; np->rx_ring[i].stat_len = 0; np->rx_ring[i].next_ptr = (u32)(np->rx_ring + ((i + 1) % RX_RING_SIZE)); np->rx_skbuff[i] = NULL; }
/* Initialize all Tx descriptors. */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].data_ptr = 0; np->tx_ring[i].misc_ctl = 0; np->tx_ring[i].stat_len = 0; np->tx_ring[i].next_ptr = (u32)(np->tx_ring + ((i + 1) % TX_RING_SIZE)); np->tx_skbuff[i] = NULL; }
/* Fill in the Rx buffers. */ for (i = 0; i < RX_RING_SIZE - 1; i++) { struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); if (skb == NULL) break; np->rx_skbuff[i] = skb; np->rx_ring[i].data_ptr = (u32)skb->data | BDMA_OWNER; skb->dev = dev; skb_reserve(skb, 2); } np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
/* Write descriptor base addresses to controller. */ CSR_WRITE(BDMARXPTR, (u32)(np->rx_ring)); CSR_WRITE(BDMATXPTR, (u32)(np->tx_ring)); }
/* Interrupt handlers. */ static void rx_intr_handler(int irq, void *dev_instance, struct pt_regs *regs) { struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np = (struct netdev_private *)dev->priv; volatile struct netdev_desc *desc; unsigned int entry, count; struct sk_buff *skb; int pkt_len; u32 stat;
count = 0; entry = np->cur_rx % RX_RING_SIZE; desc = np->rx_ring + entry; while ((u32)desc != CSR_READ(BDMARXPTR)) { stat = (desc->stat_len >> 16) & 0xffff; if(stat & Good){ pkt_len = (desc->stat_len & 0xffff); /* Chip omits the CRC. */ skb = np->rx_skbuff[entry]; skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, dev); invalidate_cache(skb->data, pkt_len); netif_rx(skb); np->rx_skbuff[entry] = NULL; np->stats.rx_packets++; np->stats.rx_bytes += pkt_len; } else { np->stats.rx_errors++; if(stat & LongErr) np->stats.rx_length_errors++; if(stat & OvMax) np->stats.rx_over_errors++; if(stat & CRCErr) np->stats.rx_crc_errors++; if(stat & AlignErr) np->stats.rx_frame_errors++; if(stat & Overflow) np->stats.rx_fifo_errors++; }
count++; entry = (entry + 1) % RX_RING_SIZE; desc = np->rx_ring + entry; } np->cur_rx += count;
CSR_WRITE(BDMASTAT, S_BRxRDF); /* Enable interrupt report. */
if (np->cur_rx - np->dirty_rx > RX_REFILL_CNT) tasklet_schedule(&np->rx_tasklet); else CSR_WRITE(BDMARXCON, gBDMARXCON); }
static void tx_intr_handler(int irq, void *dev_instance, struct pt_regs *regs) { struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np = (struct netdev_private *)dev->priv; volatile struct netdev_desc *desc; unsigned int entry, count; u32 stat;
count = 0; entry = np->dirty_tx % TX_RING_SIZE; desc = np->tx_ring + entry;
while ((u32)desc != CSR_READ(BDMATXPTR)) { stat = (desc->stat_len >> 16) & 0xffff; if(stat & Comp) { np->stats.tx_packets++; } else { np->stats.tx_errors++; if(stat & TxPar) np->stats.tx_aborted_errors++; if(stat & NCarr) np->stats.tx_carrier_errors++; if(stat & Under) np->stats.tx_fifo_errors++; if(stat & LateColl) np->stats.tx_window_errors++; if(stat & ExColl) np->stats.collisions++; } dev_kfree_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; count++; entry = (entry + 1) % TX_RING_SIZE; desc = np->tx_ring + entry; } np->dirty_tx += count;
if (netif_queue_stopped(dev)) netif_wake_queue(dev);
/* Fix me: enable BDMA rx (if it dies). */ CSR_WRITE(BDMASTAT, S_BRxRDF); CSR_WRITE(BDMARXCON, gBDMARXCON); }
static void refill_rx(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netdev_private *np = (struct netdev_private *)dev->priv; struct sk_buff *skb; unsigned int entry; int i, count; /* Sub one to avoid RXPTR run ahead of cur_rx one loop. */ count = np->cur_rx - np->dirty_rx - 1; entry = np->dirty_rx % RX_RING_SIZE; for (i = 0; i < count; i++, entry = (entry + 1) % RX_RING_SIZE) { if (np->rx_skbuff[entry]) { np->rx_ring[entry].data_ptr |= BDMA_OWNER; continue; }
skb = dev_alloc_skb(PKT_BUF_SZ); if (skb == NULL) break; /* Better luck next round. */ np->rx_skbuff[entry] = skb; np->rx_ring[entry].data_ptr = (u32)skb->data | BDMA_OWNER; skb->dev = dev; skb_reserve(skb, 2); } np->dirty_rx += i;
wbflush(); CSR_WRITE(BDMASTAT, S_BRxRDF); CSR_WRITE(BDMARXCON, gBDMARXCON); }
static int start_tx(struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = (struct netdev_private *)dev->priv; volatile struct netdev_desc *desc; unsigned int entry, align;
entry = np->cur_tx % TX_RING_SIZE; desc = np->tx_ring + entry;
align = (u32)skb->data % sizeof(u32);
np->tx_skbuff[entry] = skb; desc->misc_ctl = (Padding | CRCMode | FrameDataPtrInc | LittleEndian | MACTxIntEn | (align << 5)); desc->stat_len = (skb->len & 0xffff); desc->data_ptr = ((u32)(skb->data) - align) | BDMA_OWNER; wbflush(); CSR_WRITE(BDMATXCON, gBDMATXCON); CSR_WRITE(MACTXCON, gMACTXCON);
np->cur_tx++; /* Sub one to avoid TXPTR run ahead of dirty_tx one loop. */ if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) netif_stop_queue(dev);
dev->trans_start = jiffies;
return 0; }
static int netdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { //struct netdev_private *np = (struct netdev_private *)dev->priv; struct mii_data *data = (struct mii_data *)&ifr->ifr_data; u16 value = MiiStationRead(REGN0, REGADD);
switch(cmd) { case SIOCGMIIREG: //Read the specified of the MII register
data->phy_reg0 = MiiStationRead(REGN0, REGADD); // 0X20
data->phy_reg1 = MiiStationRead(REGN1, REGADD); data->phy_id1 = MiiStationRead(REGN2, REGADD); data->phy_id2 = MiiStationRead(REGN3, REGADD); return 0; case SIOCSMIIREG: if((value & 0xffff) != (data->phy_reg0 & 0xffff)) { value = (data->phy_reg0 & 0xffff); MiiStationWrite(REGN0, REGADD, value); printk(KERN_INFO, "after phy config\n"); } printk(KERN_INFO, "before phy config\n"); return 0; default: return -EOPNOTSUPP; } }
static int netdev_open(struct net_device *dev) { int i; MOD_INC_USE_COUNT;
/* Disable interrupt. */ disable_irq(INT_BDMATX); disable_irq(INT_BDMARX); disable_irq(INT_MACTX); disable_irq(INT_MACRX);
/* Request IRQ. */ if (request_irq(INT_BDMARX, &rx_intr_handler, SA_INTERRUPT, dev->name, dev)) { printk(KERN_ERR "%s: Request irq %d failed.\n", dev->name, INT_BDMARX); return -EAGAIN; } if (request_irq(INT_MACTX, &tx_intr_handler, SA_INTERRUPT, dev->name, dev)) { printk(KERN_ERR "%s: Request irq %d failed.\n", dev->name, INT_MACTX); return -EAGAIN; }
/* Reset BDMA and MAC. */ CSR_WRITE(BDMARXCON, BRxRS); CSR_WRITE(BDMATXCON, BTxRS); CSR_WRITE(MACON, Reset); CSR_WRITE(BDMARXLSZ, PKT_BUF_SZ); CSR_WRITE(MACON, gMACCON);
/* Configure CAM. */ for(i = 0; i < (int)dev->addr_len-2; i++) CAM_Reg(0) = (CAM_Reg(0) << 8) | dev->dev_addr[i]; for(i = (int)dev->addr_len-2; i < (int)dev->addr_len; i++) CAM_Reg(1) = (CAM_Reg(1) << 8) | dev->dev_addr[i]; CAM_Reg(1) = (CAM_Reg(1) << 16); CSR_WRITE(CAMEN, 0x0001); CSR_WRITE(CAMCON, gCAMCON);
/* Init Rx/Tx descriptors. */ init_ring(dev);
/* Enable interrupt. */ enable_irq(INT_BDMARX); enable_irq(INT_MACTX);
/* Enable BDMA and MAC. */ wbflush(); CSR_WRITE(BDMATXCON, gBDMATXCON); CSR_WRITE(MACTXCON, gMACTXCON); CSR_WRITE(BDMARXCON, gBDMARXCON); CSR_WRITE(MACRXCON, gMACRXCON); /* Start the transmit queue. */ netif_start_queue(dev); return 0; }
static int netdev_close(struct net_device *dev) { struct netdev_private *np = dev->priv; struct sk_buff *skb; int i;
/* Stop the transmit queue. */ netif_stop_queue(dev); /* Stop BDMA and MAC. */ CSR_WRITE(BDMATXCON, 0); CSR_WRITE(BDMARXCON, 0); CSR_WRITE(MACTXCON, 0); CSR_WRITE(MACRXCON, 0);
/* Free irqs. */ free_irq(INT_BDMARX, dev); free_irq(INT_MACTX, dev);
/* Wait and kill tasklet. */ tasklet_kill(&np->rx_tasklet);
/* Free all skbuffs */ for (i = 0; i < RX_RING_SIZE; i++) { skb = np->rx_skbuff[i]; if (skb) dev_kfree_skb(skb); } for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; if (skb) dev_kfree_skb(skb); }
MOD_DEC_USE_COUNT; return 0; }
static struct net_device_stats *get_stats(struct net_device *dev) { struct netdev_private *np = (struct netdev_private *)dev->priv; return &np->stats; }
static int s3c4510_probe(struct net_device *dev) { char address[12]; char *p; int i; struct netdev_private *np; SET_MODULE_OWNER(dev); /* Assign some of the fields. */ ether_setup(dev);
/* Set net_device methods. */ dev->open = netdev_open; dev->stop = netdev_close; dev->get_stats = get_stats; dev->hard_start_xmit = start_tx; //add ioctl function
dev->do_ioctl = netdev_do_ioctl;
/* Set net_device data members. */ dev->watchdog_timeo = TX_TIMEOUT; //100
dev->irq = INT_BDMARX; //17
dev->dma = 0;
/* Set MAC address. */ p = (volatile char *)(0x05000000 + 0x170000); for(i = 0; i < 12; i++){ address[i] = *(p + i); } dev->dev_addr[0] = 0x00; dev->dev_addr[1] = address[10]; dev->dev_addr[2] = address[9]; dev->dev_addr[3] = address[8]; dev->dev_addr[4] = address[1]; dev->dev_addr[5] = address[0];
/* Set private data structure. */ dev->priv = kmalloc(sizeof(*np), GFP_KERNEL); if(dev->priv == NULL) return -ENOMEM; np = dev->priv; memset(np, 0, sizeof(*np)); spin_lock_init(&np->lock); tasklet_init(&np->rx_tasklet, refill_rx, (unsigned long)dev);
/* Print banner. */ printk(KERN_INFO "%s", version);
return 0; }
static struct net_device s3c4510_device = { init: s3c4510_probe, };
static int __init s3c4510_init(void) { return register_netdev(&s3c4510_device); }
static void __exit s3c4510_exit(void) { if (s3c4510_device.priv) kfree(s3c4510_device.priv); unregister_netdev(&s3c4510_device);
return; }
module_init(s3c4510_init); module_exit(s3c4510_exit);
MODULE_DESCRIPTION("Samsung 4510B ethernet driver"); MODULE_AUTHOR("Mac Wang "); MODULE_LICENSE("GPL");
|