Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1357384
  • 博文数量: 281
  • 博客积分: 8800
  • 博客等级: 中将
  • 技术积分: 3346
  • 用 户 组: 普通用户
  • 注册时间: 2006-05-17 22:31
文章分类

全部博文(281)

文章存档

2013年(1)

2012年(18)

2011年(16)

2010年(44)

2009年(86)

2008年(41)

2007年(10)

2006年(65)

我的朋友

分类: LINUX

2009-06-17 14:08:02

//#include
//#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
 

MODULE_LICENSE("GPL");
#define DRV_NAME "dmaX5"
#define DMA_SIZE 1048576
#define DMAbuf_SIZE 1048576
#define RDELAY 5700
#define WDELAY 5700
/** default xilinx coregen vendor ID
 */
#define XILINX_VID 0x10EE    
/** default xilinx coregen device ID
 */
#define XILINX_DID 0x0007
/** BAR count of pcie core generated by copegen
 */
#define X5_BAR_NUM (1)
/** BAR number where the DMA header sits */
#define X5_BAR_HEADER (0)
 /* obtain the 32 most significant (high) bits of a 32-bit or 64-bit address */
 #define pci_dma_h(addr) ((addr >> 16) >> 16)
 /* obtain the 32 least significant (low) bits of a 32-bit or 64-bit address */
 #define pci_dma_l(addr) (addr & 0xffffffffUL)

#define XC_DEBUG(...)     \
 do {      \
  printk("%s(%d): ", __func__, __LINE__); \
  printk(__VA_ARGS__);   \
  printk("\n");    \
 } while (0)
static const unsigned long bar_min_len[X5_BAR_NUM] = {1};
 
/**
 * DMA controller control registers
 * mapped at BAR 0 to the given adress
 * @see xapp1052 p18 ff Appendix A
 */
struct x5pcie_dma_desc{
  u32 dmawas;  //0x00 dma write:endpoint ddr2 address source
  u32 dmawad_L;  //0x04 dma write:system memory add-destination lower
  u32 dmawad_U;  //0x08 dma write:system memory add-dest upper
  u32 dmaras_L;  //0x0c dma read:system memory add-source lower
  u32 dmaras_U;  //0x10 dma read:system memory add-source upper
  u32 dmarad;  //0x14 dma read:endpoint ddr2 address destination
  u32 dmawxs;  //0x18 dma write trasnfer size
  u32 dmarxs;  //0x1c dma read transfer size
  u32 unused1;
  u32 unused2;
  u32 dmacst;  //dma  control/status reg
  u32 unused3;
  u32 dmawrp;  //dma write transfer counter(read-only)
  u32 dmardp;  //dma read transfer counter (read only) 
} __attribute__ ((packed));
/* use the attribute packed to force gcc to aline the contents of the struct right  */
/* there where the predecessor starts, to impact on u32 datatypes but thats the way  */
/* to do it */

/**
 * xilinx virtex5 pci express board bookkeeping data
 */
struct x5pcie_dev{
  /** the kernel pci device data structure */
  struct pci_dev *pci_dev;
  /** kernels virtual addr. for the mapped BARs */
  void * __iomem bar[X5_BAR_NUM];
  /** kernel virtual addr. for DMA Buffer in Root complex Memmory*/
  void * dmaBuf_va;
  /**physical bus address for the DMA Buffer
   * CPU-native endianess */
  dma_addr_t dmaBuf_pba;
  /* if the device regions could not be allocated, assume and remember it
   * is in use by another driver; this driver must not disable the device. */
  int in_use;
  /* whether this driver could obtain the regions, 0 otherwise */
  int got_regions;
  /* irq line succesfully requested by this driver, -1 otherwise */
  int irq_line;
  /* board revision */
  u8 revision; 
 
  /* character device */
  dev_t cdevno;
  struct cdev cdev;
   
};
static void printX5BMD_regs(struct x5pcie_dma_desc *desc_table)
{
  XC_DEBUG(KERN_DEBUG "X5 BMD reg dump \n ===================================");
  XC_DEBUG(KERN_DEBUG "dmawas:           0x%08x", le32_to_cpu(readl(&desc_table->dmawas)));
  XC_DEBUG(KERN_DEBUG "dmawad_L:         0x%08x", le32_to_cpu(readl(&desc_table->dmawad_L)));
  XC_DEBUG(KERN_DEBUG "dmawad_U:         0x%08x", be32_to_cpu(readl(&desc_table->dmawad_U)));
  XC_DEBUG(KERN_DEBUG "dmaras_L:         0x%08x", le32_to_cpu(readl(&desc_table->dmaras_L)));
  XC_DEBUG(KERN_DEBUG "dmaras_U:         0x%08x", le32_to_cpu(readl(&desc_table->dmaras_U)));
  XC_DEBUG(KERN_DEBUG "dmarad:           0x%08x", le32_to_cpu(readl(&desc_table->dmarad)));
  XC_DEBUG(KERN_DEBUG "dmawxs:         0x%08x", le32_to_cpu(readl(&desc_table->dmawxs)));
  XC_DEBUG(KERN_DEBUG "dmarxs:         0x%08x", le32_to_cpu(readl(&desc_table->dmarxs)));
  XC_DEBUG(KERN_DEBUG "dmacst:         0x%08x", le32_to_cpu(readl(&desc_table->dmacst)));
  XC_DEBUG(KERN_DEBUG "dmawrp:          0x%08x", le32_to_cpu(readl(&desc_table->dmawrp)));
  XC_DEBUG(KERN_DEBUG "dmardp:           0x%08x", le32_to_cpu(readl(&desc_table->dmardp)));
 
}
/* prototypes for character device */
 static int x5_init(struct x5pcie_dev *x5pcie);
 static void x5_exit(struct x5pcie_dev *x5pcie);
/*
static void __devinit calcWriteDmaBW(struct x5pcie_dev *x5pcie, struct pci_dev *dev)
{
  struct x5pcie_dma_desc *desc_table = (struct x5pcie_dma_desc *)x5pcie->bar[X5_BAR_HEADER];
  u32 overallData;
  u32 bandwidth;
  overallData = readl(&desc_table->dmawrp) * 4 ;
  bandwidth = (overallData * 8 * 1000) / (32 * readl(&desc_table->dmawrp));
*/
/*
  XC_DEBUG(KERN_DEBUG "X5 BMD Write Performace \n ===================================");
  XC_DEBUG(KERN_DEBUG "TLP Size[DWORDS]:               %u",readl(&desc_table->wdmatlps));
  XC_DEBUG(KERN_DEBUG "TLPs to transfer:               %u",readl(&desc_table->wdmatlpc));
  XC_DEBUG(KERN_DEBUG "Overall Data Transferd [bytes]: %u",overallData);
  XC_DEBUG(KERN_DEBUG "Clock Cycle Count[32ns]:        %u",readl(&desc_table->wdmx5pcierf));
  XC_DEBUG(KERN_DEBUG "Throughput [Mbps]:              %u",bandwidth);
 */ 
//}
/*
static void __devinit calcReadDmaBW(struct x5pcie_dev *x5pcie, struct pci_dev *dev)
{
  struct x5pcie_dma_desc *desc_table = (struct x5pcie_dma_desc *)x5pcie->bar[X5_BAR_HEADER];
  u32 overallData;
  u32 bandwidth;
  overallData = readl(&desc_table->dmardp) * 4;
  bandwidth = (overallData * 8 * 1000) / (32 * readl(&desc_table->dmardp));
*/
/*
  XC_DEBUG(KERN_DEBUG "X5 BMD Read Performace \n ===================================");
  XC_DEBUG(KERN_DEBUG "TLP Size[DWORDS]:               %u",readl(&desc_table->rdmatlps));
  XC_DEBUG(KERN_DEBUG "TLPs to transfer:               %u",readl(&desc_table->rdmatlpc));
  XC_DEBUG(KERN_DEBUG "Overall Data Transferd [bytes]: %u",overallData);
  XC_DEBUG(KERN_DEBUG "Clock Cycle Count[32ns]:        %u",readl(&desc_table->rdmx5pcierf));
  XC_DEBUG(KERN_DEBUG "Throughput [Mbps]:              %u",bandwidth);
 */ 
//}
/*
static void __devinit dma_test(struct x5pcie_dev *x5pcie, struct pci_dev *dev)
{
  struct x5pcie_dma_desc *desc_table = (struct x5pcie_dma_desc *)x5pcie->bar[X5_BAR_HEADER];
  int i;
 
 // modify DMA buffer
  for(i = 0; i < 512; i++){
 ((u32*)x5pcie->dmaBuf_va)[i] = 0xBEEFFEEDUL;
  }
 //wait until memory phy layer is initialized
  while((readl(&desc_table->dmacst)) && 0x20UL);
 
  */
 
/*  -------------------------------------------------------------*/
  /* DMA read to RootComplex Mem from system memory*/
  /*
//  writel(cpu_to_le32(0xBEEFFEEDUL),&desc_table->rdmatlpp);
  writel(cpu_to_le32(pci_dma_l(x5pcie->dmaBuf_pba)),&desc_table->dmaras_L);  // lower 32-bits of physical bus address of DMA able buffer
  writel(cpu_to_le32(pci_dma_h(x5pcie->dmaBuf_pba)),&desc_table->dmaras_U);  // upper 32-bits of physical bus address of DMA able buffer
  writel(cpu_to_le32(0x20UL),&desc_table->dmawxs);              // write DMA TLP size: 32 dwords to transfer
  writel(cpu_to_le32(0x00UL),&desc_table->dmarad);    //destination address of endpoint ddr2 memory
 
  writel(cpu_to_le32(0x04UL),&desc_table->dmacst);                // write 1 to read-dma start bit[16] to dmacst
  wmb();
  while(!(readl(&desc_table->dmacst) && 0x08UL));
  XC_DEBUG(KERN_DEBUG "DMA read complete");
  calcReadDmaBW(x5pcie,dev);
  XC_DEBUG(KERN_DEBUG "first and last 2 WORDS of the buffer");
  for(i = 0; i < 2; i++){
 XC_DEBUG(KERN_DEBUG "%d : 0x%08x",i,((u32*)x5pcie->dmaBuf_va)[i]);
  }
  for(i = 510; i < 512; i++){
 XC_DEBUG(KERN_DEBUG "%d : 0x%08x",i,((u32*)x5pcie->dmaBuf_va)[i]);
  }
*/
/*  -------------------------------------------------------------*/
  /* DMA write from endpoint ddr2 Mem to system memory*/ 
/* 
  writel(cpu_to_le32(((x5pcie->dmaBuf_pba) + 0x20UL)),(&desc_table->dmawad_L));  // lower 32-bits of physical bus address of DMA able buffer
 // writel(cpu_to_le32(((x5pcie->dmaBuf_pba) + 0x20UL)),(&desc_table->dmawad_U));  // upper 32-bits of physical bus address of DMA able buffer
  writel(cpu_to_le32(0x20UL),&desc_table->dmawxs);              // write DMA TLP size: 32 dwords to transfer
  writel(cpu_to_le32(0x00UL),&desc_table->dmawas);    //source address of endpoint ddr2 memory
 
  writel(cpu_to_le32(0x01UL),&desc_table->dmacst);                // write dma start bit[0] to ddmacr
  // now the dma trasfer should be running, wait for interrrupt
  wmb();
  while(!(readl(&desc_table->dmacst) && 0x01UL));
  XC_DEBUG(KERN_DEBUG "DMA write complete");
  //printX5BMD_regs(desc_table);
 
  calcWriteDmaBW(x5pcie,dev);
  for(i =0;i<32;i++)
  {
   if(readl(x5pcie->dmaBuf_va) != readl(x5pcie->dmaBuf_va +0x20UL))
  XC_DEBUG(KERN_DEBUG " error in transfer");
  
 else { x5pcie->dmaBuf_va++;}
  }
  printX5BMD_regs(desc_table);
}
 
*/
static irqreturn_t x5pciedma_irq(int irq, void *dev_id)
{
  struct x5pcie_dev *x5pcie = (struct x5pcie_dev *)dev_id;
  XC_DEBUG(KERN_DEBUG DRV_NAME "\n\n\n\nIRQ\n\n\n\n");
  if (!x5pcie)
 return IRQ_NONE;
  return IRQ_HANDLED;
}
static int __devinit scan_bars(struct x5pcie_dev *x5pcie, struct pci_dev *dev)
{
  int i;
  for (i = 0; i < X5_BAR_NUM; i++) {
 unsigned long bar_start = pci_resource_start(dev, i);
 if (bar_start) {
   unsigned long bar_end = pci_resource_end(dev, i);
   unsigned long bar_flags = pci_resource_flags(dev, i);
   XC_DEBUG(KERN_DEBUG "BAR%d 0x%08lx-0x%08lx flags 0x%08lx",
    i, bar_start, bar_end, bar_flags);
 }
  }
  return 0;
}

/**
 * Map the device memory regions into kernel virtual address space
 * after verifying their sizes respect the minimum sizes needed, given
 * by the bar_min_len[] array.
 */
static int __devinit map_bars(struct x5pcie_dev *x5pcie, struct pci_dev *dev)
{
  int i;
  for (i = 0; i < X5_BAR_NUM; i++){
 unsigned long bar_start = pci_resource_start(dev, i);
 unsigned long bar_end = pci_resource_end(dev, i);
 unsigned long bar_length = bar_end - bar_start + 1;
 
 /* do not map BARs with length 0 */
 if (!bar_min_len[i]) continue;
 
 if (!bar_start || !bar_end) {
   XC_DEBUG(KERN_DEBUG "BAR #%d is not present?!", i);
   return -1;
 }
 if (bar_length < bar_min_len[i]) {
   XC_DEBUG(KERN_DEBUG "BAR #%d length = %lu bytes but driver requires at least %lu bytes",
      i, bar_length, bar_min_len[i]);
   return -1;
 }
 /* map the device memory or IO region into kernel virtual
  * address space */ 
 x5pcie->bar[i] = ioremap(bar_start,bar_length);
 
 if (!x5pcie->bar[i]) {
   XC_DEBUG(KERN_DEBUG "Could not map BAR #%d.", i);
   return -1;
 }
 
 XC_DEBUG(KERN_DEBUG "BAR[%d] mapped at 0x%p with length %lu.", i,
    x5pcie->bar[i], bar_length);
 }
  return 0;
/**
 * Free the BAR regions that had been mapped earlier using map_bars()
 */
static void free_bars(struct x5pcie_dev *x5pcie, struct pci_dev *dev)
{
  int i;
  for (i = 0; i < X5_BAR_NUM; i++) {
 if (x5pcie->bar[i]) {
   pci_iounmap(dev, x5pcie->bar[i]);
   x5pcie->bar[i] = NULL;
 }
  }
}
 
 
/**
 * Called when the PCI sub system thinks we can control the given device.
 * Inspect if we can support the device and if so take control of it.
 *
 * @return 0 when we have taken control of the given device.
 *
 * - allocate board specific bookkeeping
 * - allocate coherently-mapped memory for the descriptor table
 * - enable the board
 * - verify board revision
 * - request regions
 * - query DMA mask
 * - obtain and request irq
 * - map regions into kernel address space
 */
static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id)
{
  int rc = 0;
  struct x5pcie_dev *x5pcie = 0;
  u8 irq_pin, irq_line;
 
  XC_DEBUG(KERN_DEBUG "probe(dev = 0x%p, pciid = 0x%p)", dev, id);
  /*   allocate mem for bookkeeping data */
  x5pcie = kzalloc(sizeof(struct x5pcie_dev), GFP_KERNEL);
 
  if(!x5pcie){
 XC_DEBUG(KERN_DEBUG "Could not allocate kzalloc() mem ");
 goto err_kzalloc;
  }
  x5pcie->pci_dev = dev;
  dev->dev.driver_data = (void *)x5pcie;
 
  /* allocate and map coherently-cached memory for a descriptor table */
  /* @see LDD3 page 446 */
  XC_DEBUG(KERN_DEBUG "sizeof(struct x5pcie_dma_desc) = %d.",
     (int)sizeof(struct x5pcie_dma_desc));
  XC_DEBUG(KERN_DEBUG "DMAbuf_SIZE: %d",(int)DMAbuf_SIZE);
 
  x5pcie->dmaBuf_va = pci_alloc_consistent(dev,DMAbuf_SIZE ,&x5pcie->dmaBuf_pba);
  if (!x5pcie->dmaBuf_va) {
 XC_DEBUG(KERN_DEBUG "Could not dma_alloc()ate_coherent memory.");
 goto err_table;
  }
  XC_DEBUG(KERN_DEBUG "dmaBuf_va = 0x%p, dmaBuf_pba = 0x%p.",
   (void *)x5pcie->dmaBuf_va, (void *)x5pcie->dmaBuf_pba);
  /* enable device */
  rc = pci_enable_device(dev);
  if (rc) {
 XC_DEBUG(KERN_DEBUG "pci_enable_device() failed");
 goto err_enable;
  }
  /* enable bus master capability on device */
  pci_set_master(dev);
 
  rc = pci_request_regions(dev, DRV_NAME);
  /* could not request all regions? */
  if (rc) {
 /* assume device is in use (and do not disable it later!) */
 x5pcie->in_use = 1;
 goto err_regions;
  }
  x5pcie->got_regions = 1;
 
  /*   query DMA mask, see DMA-mapping.txt l110 */
  if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) {
 XC_DEBUG(KERN_DEBUG "Using a 64-bit DMA mask.");
 pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK);
  } else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
 XC_DEBUG(KERN_DEBUG "Using a 32-bit DMA mask.");
 pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK);
  } else {
 XC_DEBUG(KERN_WARNING
     "x5dma: No suitable DMA available.");
 goto err_mask;
  }

  /* enable message signaled interrupts, returns 0 on success   */
  rc = pci_enable_msi(dev);
  if(!rc){    
 XC_DEBUG(KERN_DEBUG "could not enable MSI");
  }
  rc = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin);
  /* could not read? */
  if (rc)
 goto err_irq;
  XC_DEBUG(KERN_DEBUG "IRQ pin #%d (0=none, 1=INTA#...4=INTD#).", irq_pin);
  /* @see LDD3, page 318 */
  rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_line);
  /* could not read? */
  if (rc) {
 XC_DEBUG(KERN_DEBUG "Could not query PCI_INTERRUPT_LINE, error %d", rc);
 goto err_irq;
  }
  XC_DEBUG(KERN_DEBUG "IRQ line #%d.", irq_line);
  if (!irq_line) {
 /** @todo Choose proper error code */
 rc = -1;
 goto err_irq;
  }
  /* @see LDD3, page 259 */
  rc = request_irq(irq_line, x5pciedma_irq, IRQF_SHARED | IRQF_DISABLED, DRV_NAME, (void *)x5pcie);
  if (rc) {
 XC_DEBUG(KERN_DEBUG "Could not request IRQ #%d, error %d", irq_line, rc);
 x5pcie->irq_line = -1;
 goto err_irq;
  }

  /* remember which irq we allocated */
  x5pcie->irq_line = (int)irq_line;
  XC_DEBUG(KERN_DEBUG "Succesfully requested IRQ #%d with dev_id 0x%p", irq_line, x5pcie);
  scan_bars(x5pcie, dev); 
  rc = map_bars(x5pcie, dev);
  if (rc)
 goto err_map;
 
 /* initialize character device */
  rc = x5_init(x5pcie);
  if (rc)
    goto err_cdev;
 
//  dma_test(x5pcie,dev);
 
  /* succesfully took the device */
  rc = 0;
  XC_DEBUG(KERN_DEBUG "probe() successful.");
  struct x5pcie_dma_desc *desc_table = (struct x5pcie_dma_desc *)x5pcie->bar[X5_BAR_HEADER]; 
  printX5BMD_regs(desc_table);
 
  goto end; 
 err_map:
  /* free allocated irq */
  if (x5pcie->irq_line >= 0){
 pci_disable_msi(dev);
 free_irq(x5pcie->irq_line, (void *)x5pcie);
  }
 err_irq: 
  /* disable the device iff it is not in use */
  if (!x5pcie->in_use)
 pci_disable_device(dev);
  if (x5pcie->got_regions)
 pci_release_regions(dev);
 err_mask:
 err_regions:
  /* clean up everything before device enable() */
 err_enable:
  if (x5pcie->dmaBuf_va)
 pci_free_consistent(dev,  DMAbuf_SIZE, x5pcie->dmaBuf_va, x5pcie->dmaBuf_pba);
  /* clean up everything before allocating descriptor table */ 
 err_table:
  if(x5pcie)
 kfree(x5pcie);
 err_cdev:
 /* unmap the BARs */
    free_bars(x5pcie, dev);
 
 err_kzalloc:
 end:
  return rc;

}

static void __devexit remove(struct pci_dev *dev)
{
  struct x5pcie_dev *x5pcie = 0; 
  XC_DEBUG(KERN_DEBUG "remove(0x%p)", dev);
  if ((dev == 0) || (dev->dev.driver_data == 0)) {
 XC_DEBUG(KERN_DEBUG "remove(dev = 0x%p) dev->dev.driver_data = 0x%p", dev, dev->dev.driver_data);
 return;
  }
  x5pcie = (struct x5pcie_dev *)dev->dev.driver_data;
  XC_DEBUG(KERN_DEBUG "remove(dev = 0x%p) where dev->dev.driver_data = 0x%p", dev, x5pcie);
  if (x5pcie->pci_dev != dev) {
 XC_DEBUG(KERN_DEBUG "dev->dev.driver_data->pci_dev (0x%08lx) != dev (0x%08lx)",
    (unsigned long)x5pcie->pci_dev, (unsigned long)dev);
  }
  if (x5pcie->dmaBuf_va)
 pci_free_consistent(dev, DMAbuf_SIZE, x5pcie->dmaBuf_va, x5pcie->dmaBuf_pba);
 
  x5_exit(x5pcie);
 
  /* free IRQ
   * @see
   */
  if (x5pcie->irq_line >= 0) {
 XC_DEBUG(KERN_DEBUG "Freeing IRQ line #%d for dev_id 0x%08lx.",
     x5pcie->irq_line, (unsigned long)x5pcie);
 pci_disable_msi(dev);
 free_irq(x5pcie->irq_line, (void *)x5pcie);
  }
  /* unmap the BARs */
  free_bars(x5pcie, dev);
  if (!x5pcie->in_use)
 pci_disable_device(dev);
  if (x5pcie->got_regions)
 /* to be called after device disable */
 pci_release_regions(dev);
}
static int x5_open(struct inode *inode, struct file *file)
 {
         struct x5pcie_dev *x5pcie;
         printk(KERN_DEBUG DRV_NAME "_open()\n");
         /* pointer to containing data structure of the character device inode */
         x5pcie = container_of(inode->i_cdev, struct x5pcie_dev, cdev);
         // create a reference to our device state in the opened file
         file->private_data = x5pcie;
         return 0;
 }
 /*
  * Called when the device goes from used to unused.
  */
 static int x5_close(struct inode *inode, struct file *file)
 {  struct x5pcie_dev *x5pcie = (struct x5pcie_dev *)file->private_data;
         struct x5pcie_dma_desc *desc_table = (struct x5pcie_dma_desc *)x5pcie->bar[X5_BAR_HEADER];
         printX5BMD_regs(desc_table);
  printk(KERN_DEBUG DRV_NAME "_close()\n");
         return 0;
 }
 
static ssize_t x5_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 {
    unsigned int act_count=0 ,trans_size =0 ,trans_bytes = 0;
    unsigned int ddr2_dest = 0,init_count;
    dma_addr_t dmabuf_src;
 /* fetch device specific data stored earlier during open */
    struct x5pcie_dev *x5pcie = (struct x5pcie_dev *)file->private_data;
    u32 read_start; 
 struct x5pcie_dma_desc *desc_table = (struct x5pcie_dma_desc *)x5pcie->bar[X5_BAR_HEADER];
 
//    XC_DEBUG(KERN_DEBUG DRV_NAME "entering read function"); 
   
  act_count = copy_from_user(x5pcie->dmaBuf_va,buf,count);
  init_count = readl(&desc_table->dmardp);
  trans_size = count -act_count;
  dmabuf_src = x5pcie->dmaBuf_pba;
  trans_bytes = trans_size;
  while(trans_size >0) {
   if(trans_size <= DMA_SIZE) trans_bytes = trans_size;
   else trans_bytes = DMA_SIZE;
 
  writel(cpu_to_le32(pci_dma_l(dmabuf_src)),&desc_table->dmaras_L);  // lower 32-bits of physical bus address of DMA able buffer
  writel(cpu_to_le32(0x00UL),&desc_table->dmaras_U);  // upper 32-bits of physical bus address of DMA able buffer
  writel(cpu_to_le32(trans_bytes),&desc_table->dmarxs);              // write DMA TLP size
  writel(cpu_to_le32(ddr2_dest),&desc_table->dmarad);  //destination address of endpoint ddr2 memory
 
  read_start = readl(&desc_table->dmacst);
  read_start = read_start | 0x04UL;
  //  XC_DEBUG(KERN_DEBUG DRV_NAME " read-start value: %x \n",read_start);
  writel(read_start,&desc_table->dmacst);                // write 1 to read-dma start bit[16] to dmacst
  wmb();
 
//  while(!(readl(&desc_table->dmacst) | 0xFFFFFFF7));
  udelay(RDELAY);
  XC_DEBUG(KERN_DEBUG "DMA read complete");
  ddr2_dest = ddr2_dest + trans_bytes;
  dmabuf_src = dmabuf_src + trans_bytes;
  trans_size = trans_size - trans_bytes;
  }
   
  //XC_DEBUG(KERN_DEBUG "dmacst:         0x%08x", le32_to_cpu(readl(&desc_table->dmacst)));
  //XC_DEBUG(KERN_DEBUG DRV_NAME "_DMACST address: 0x%p\n",(void *)&desc_table->dmacst);
  //XC_DEBUG(KERN_DEBUG "dmacst:         0x%08x", le32_to_cpu(readl(&desc_table->dmacst))); 
//  printX5BMD_regs(desc_table); 
  trans_bytes = readl(&desc_table->dmardp)-init_count;
  //XC_DEBUG(KERN_DEBUG "DMA final read complete");
  XC_DEBUG(KERN_DEBUG "read packets transferred from system memory -> fpga = %d\n",trans_bytes);
 
  return (count-act_count);
 }
 
 static ssize_t x5_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
 { 
    unsigned int act_count=0 ,trans_size =0 ,trans_bytes = 0;
    unsigned int ddr2_src = 0,init_count;
    dma_addr_t dmabuf_dest;
   u32 write_start;
 /* fetch device specific data stored earlier during open */
    struct x5pcie_dev *x5pcie = (struct x5pcie_dev *)file->private_data;
 
 struct x5pcie_dma_desc *desc_table = (struct x5pcie_dma_desc *)x5pcie->bar[X5_BAR_HEADER];
 
    //XC_DEBUG(KERN_DEBUG DRV_NAME "entering write function"); 
   
  init_count = readl(&desc_table->dmawrp);
  trans_size = count;
  dmabuf_dest = x5pcie->dmaBuf_pba;
  trans_bytes = trans_size;
  while(trans_size >0) {
   if(trans_size <= DMA_SIZE) trans_bytes = trans_size;
   else trans_bytes = DMA_SIZE;
 
  writel(cpu_to_le32(pci_dma_l(dmabuf_dest)),&desc_table->dmawad_L);  // lower 32-bits of physical bus address of DMA able buffer
  writel(cpu_to_le32(0x00UL),&desc_table->dmawad_U);  // upper 32-bits of physical bus address of DMA able buffer
  writel(cpu_to_le32(trans_bytes),&desc_table->dmawxs);              // write DMA TLP size
  writel(cpu_to_le32(ddr2_src),&desc_table->dmawas);  //destination address of endpoint ddr2 memory
 
  write_start = readl(&desc_table->dmacst);
  write_start = write_start | 0x01UL;
  //  XC_DEBUG(KERN_DEBUG DRV_NAME " write-start value: %x \n",write_start);
  writel(write_start,&desc_table->dmacst);                // write 1 to write-dma start bit of dmacst
  wmb();
  udelay(WDELAY);
 // XC_DEBUG(KERN_DEBUG "DMA write complete");
  ddr2_src = ddr2_src + trans_bytes;
  dmabuf_dest = dmabuf_dest + trans_bytes;
  trans_size = trans_size - trans_bytes;
  }
/*  -------------------------------------------------------------*/
  /* DMA write from endpoint ddr2 Mem to system memory*/ 
 
//  printX5BMD_regs(desc_table); 
  trans_bytes = readl(&desc_table->dmawrp)-init_count;
//  XC_DEBUG(KERN_DEBUG "DMA final write complete");
  XC_DEBUG(KERN_DEBUG "write packets transferred from fpga-> system memory = %d\n",trans_bytes);
// XC_DEBUG(KERN_DEBUG DRV_NAME " physical address: 0x%p,  virtual address: 0x%p, user_buf: 0x%p \n",(void *)x5pcie->dmaBuf_pba,(void *) x5pcie->dmaBuf_va,(void *)buf);              
  act_count = copy_to_user((unsigned long *)buf,x5pcie->dmaBuf_va,count);
  return (count-act_count);
 }

static struct pci_device_id dmaX5_ids[] = {
 { PCI_DEVICE(XILINX_VID, XILINX_DID) },
 { 0 },
};
MODULE_DEVICE_TABLE(pci, dmaX5_ids);
/**
 * used to register the driver with the PCI kernel sub system
 * @see LDD3 page 311
 */
static struct pci_driver pci_driver = {
 .name = DRV_NAME,
 .id_table = dmaX5_ids,
 .probe = probe,
 .remove = remove,
 /* resume, suspend are optional */
};
static struct file_operations x5_fops = {
   .owner = THIS_MODULE,
   .open = x5_open,
   .release = x5_close,
   .read = x5_read,
   .write = x5_write,
 };

/**
 * Initialize the driver module (but not any device) and register
 * the module with the kernel PCI subsystem.
 */
static int x5_init(struct x5pcie_dev *x5pcie)
 {
         int rc;
         printk(KERN_DEBUG DRV_NAME " _nit()\n");
         /* allocate a dynamically allocated character device node */
         rc = alloc_chrdev_region(&x5pcie->cdevno, 0/*requested minor*/, 1/*count*/, DRV_NAME);
         /* allocation failed? */
         if (rc < 0) {
                 printk("alloc_chrdev_region() = %d\n", rc);
                 goto fail_alloc;
         }
         /* couple the device file operations to the character device */
         cdev_init(&x5pcie->cdev, &x5_fops);
         x5pcie->cdev.owner = THIS_MODULE;
         /* bring character device live */
         rc = cdev_add(&x5pcie->cdev, x5pcie->cdevno, 1/*count*/);
         if (rc < 0) {
                 printk("cdev_add() = %d\n", rc);
                 goto fail_add;
         }
         printk(KERN_DEBUG "x5pcie = %d:%d\n", MAJOR(x5pcie->cdevno), MINOR(x5pcie->cdevno));
         return 0;
 fail_add:
         /* free the dynamically allocated character device node */
     unregister_chrdev_region(x5pcie->cdevno, 1/*count*/);
 fail_alloc:
         return(-1);
 }
static void x5_exit(struct x5pcie_dev *x5pcie)
 {
         printk(KERN_DEBUG DRV_NAME " _exit()\n");
         /* remove the character device */
         cdev_del(&x5pcie->cdev);
         /* free the dynamically allocated character device node */
         unregister_chrdev_region(x5pcie->cdevno, 1/*count*/);
 }

/**
 * Initialize the driver module (but not any device) and register
 * the module with the kernel PCI subsystem.
 */
static int __init dmaX5_init(void)
{
 XC_DEBUG(KERN_DEBUG "init() ---");
 /* register this driver with the PCI bus driver */
 return pci_register_driver(&pci_driver);
}
static void __exit dmaX5_exit(void)
{
 XC_DEBUG(KERN_DEBUG "exit");
 /* unregister this driver from the PCI bus driver */
 pci_unregister_driver(&pci_driver);
}

module_init(dmaX5_init);
module_exit(dmaX5_exit);
阅读(1948) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~