static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), ata_xlat_func_t xlat_func) { struct ata_queued_cmd *qc; int is_io = xlat_func == ata_scsi_rw_xlat;
VPRINTK("ENTER\n");
if (unlikely(ata_scmd_need_defer(dev, is_io))) goto defer;
qc = ata_scsi_qc_new(dev, cmd, done); if (!qc) goto err_mem;
/* data is present; dma-map it */ if (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_TO_DEVICE) { if (unlikely(cmd->request_bufflen < 1)) { ata_dev_printk(dev, KERN_WARNING, "WARNING: zero len r/w req\n"); goto err_did; } //直接使用scatter list
if (cmd->use_sg) ata_sg_init(qc, cmd->request_buffer, cmd->use_sg); else ata_sg_init_one(qc, cmd->request_buffer, cmd->request_bufflen);
qc->dma_dir = cmd->sc_data_direction; }
qc->complete_fn = ata_scsi_qc_complete;
if (xlat_func(qc)) goto early_finish;
/* select device, send command to hardware */ ata_qc_issue(qc);
VPRINTK("EXIT\n"); return 0;
early_finish: ata_qc_free(qc); qc->scsidone(cmd); DPRINTK("EXIT - early finish (good or error)\n"); return 0;
err_did: ata_qc_free(qc); cmd->result = (DID_ERROR << 16); qc->scsidone(cmd); err_mem: DPRINTK("EXIT - internal\n"); return 0;
defer: DPRINTK("EXIT - defer\n"); return SCSI_MLQUEUE_DEVICE_BUSY; }
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem) { qc->flags |= ATA_QCFLAG_SG; qc->__sg = sg; qc->n_elem = n_elem; qc->orig_n_elem = n_elem; }
void ata_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap;
/* Make sure only one non-NCQ command is outstanding. The * check is skipped for old EH because it reuses active qc to * request ATAPI sense. */ WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
if (qc->tf.protocol == ATA_PROT_NCQ) { WARN_ON(ap->sactive & (1 << qc->tag)); ap->sactive |= 1 << qc->tag; } else { WARN_ON(ap->sactive); ap->active_tag = qc->tag; }
qc->flags |= ATA_QCFLAG_ACTIVE; ap->qc_active |= 1 << qc->tag;
if (ata_should_dma_map(qc)) { if (qc->flags & ATA_QCFLAG_SG) { if (ata_sg_setup(qc)) goto sg_err; } else if (qc->flags & ATA_QCFLAG_SINGLE) { if (ata_sg_setup_one(qc)) goto sg_err; } } else { qc->flags &= ~ATA_QCFLAG_DMAMAP; }
ap->ops->qc_prep(qc);
qc->err_mask |= ap->ops->qc_issue(qc); if (unlikely(qc->err_mask)) goto err; return;
sg_err: qc->flags &= ~ATA_QCFLAG_DMAMAP; qc->err_mask |= AC_ERR_SYSTEM; err: ata_qc_complete(qc); }
static int ata_sg_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct scatterlist *sg = qc->__sg; //取最后一个scatter list
struct scatterlist *lsg = &sg[qc->n_elem - 1]; int n_elem, pre_n_elem, dir, trim_sg = 0;
VPRINTK("ENTER, ata%u\n", ap->print_id); WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
/* we must lengthen transfers to end on a 32-bit boundary */ qc->pad_len = lsg->length & 3; if (qc->pad_len) { void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); struct scatterlist *psg = &qc->pad_sgent; unsigned int offset;
WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
memset(pad_buf, 0, ATA_DMA_PAD_SZ);
/* * psg->page/offset are used to copy to-be-written * data in this function or read data in ata_sg_clean. */ offset = lsg->offset + lsg->length - qc->pad_len; psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); psg->offset = offset_in_page(offset);
if (qc->tf.flags & ATA_TFLAG_WRITE) { void *addr = kmap_atomic(psg->page, KM_IRQ0); memcpy(pad_buf, addr + psg->offset, qc->pad_len); kunmap_atomic(addr, KM_IRQ0); }
sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); sg_dma_len(psg) = ATA_DMA_PAD_SZ; /* trim last sg */ lsg->length -= qc->pad_len; if (lsg->length == 0) trim_sg = 1;
DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", qc->n_elem - 1, lsg->length, qc->pad_len); }
pre_n_elem = qc->n_elem; if (trim_sg && pre_n_elem) pre_n_elem--;
if (!pre_n_elem) { n_elem = 0; goto skip_map; }
dir = qc->dma_dir; n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); if (n_elem < 1) { /* restore last sg */ lsg->length += qc->pad_len; return -1; }
DPRINTK("%d sg elements mapped\n", n_elem);
skip_map: qc->n_elem = n_elem;
return 0; }
static void ata_fill_sg(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct scatterlist *sg; unsigned int idx;
WARN_ON(qc->__sg == NULL); WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
idx = 0; ata_for_each_sg(sg, qc) { u32 addr, offset; u32 sg_len, len;
/* determine if physical DMA addr spans 64K boundary. * Note h/w doesn't support 64-bit, so we unconditionally * truncate dma_addr_t to u32. */ addr = (u32) sg_dma_address(sg); sg_len = sg_dma_len(sg);
while (sg_len) { offset = addr & 0xffff; len = sg_len; if ((offset + sg_len) > 0x10000) len = 0x10000 - offset; //按照PRD table填充相应的数据
ap->prd[idx].addr = cpu_to_le32(addr); ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++; sg_len -= len; addr += len; } } //physical region descriptor table 的结束标志
if (idx) ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); }
|