Chinaunix首页 | 论坛 | 博客
  • 博客访问: 783517
  • 博文数量: 127
  • 博客积分: 2669
  • 博客等级: 少校
  • 技术积分: 1680
  • 用 户 组: 普通用户
  • 注册时间: 2009-10-23 11:39
文章分类

全部博文(127)

文章存档

2014年(5)

2013年(19)

2012年(25)

2011年(9)

2010年(25)

2009年(44)

分类: LINUX

2012-11-23 00:02:54

上次写到系统在运行scsi_error_handle线程之前的初始化过程,系统会在libata-core.c 函数ata_host_register中等待该线程的执行,现在我们从这个线程开始执行:

linux/driver/scsi/scsi_error.c
/**
 * scsi_error_handler - SCSI error handler thread
 * @data:    Host for which we are running.
 *
 * Notes:
 *    This is the main error handling loop.  This is run as a kernel thread
 *    for every SCSI host and handles all error handling activity.
 **/
int scsi_error_handler(void *data)
{
    struct Scsi_Host *shost = data;

    /*
     * We use TASK_INTERRUPTIBLE so that the thread is not
     * counted against the load average as a running process.
     * We never actually get interrupted because kthread_run
     * disables singal delivery for the created thread.
     */
    set_current_state(TASK_INTERRUPTIBLE);
    while (!kthread_should_stop()) {
        if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
            shost->host_failed != shost->host_busy) {
            SCSI_LOG_ERROR_RECOVERY(1,
                printk("Error handler scsi_eh_%d sleeping\n",
                    shost->host_no));
            schedule();
            set_current_state(TASK_INTERRUPTIBLE);
            continue;
        }

        __set_current_state(TASK_RUNNING);
        SCSI_LOG_ERROR_RECOVERY(1,
            printk("Error handler scsi_eh_%d waking up\n",
                shost->host_no));

        /*
         * We have a host that is failing for some reason.  Figure out
         * what we need to do to get it up and online again (if we can).
         * If we fail, we end up taking the thing offline.
         */
        if (shost->transportt->eh_strategy_handler)
            shost->transportt->eh_strategy_handler(shost);
        else
            scsi_unjam_host(shost);

        /*
         * Note - if the above fails completely, the action is to take
         * individual devices offline and flush the queue of any
         * outstanding requests that may have been pending.  When we
         * restart, we restart any I/O to any other devices on the bus
         * which are still online.
         */
        scsi_restart_operations(shost);
        set_current_state(TASK_INTERRUPTIBLE);
    }
    __set_current_state(TASK_RUNNING);

    SCSI_LOG_ERROR_RECOVERY(1,
        printk("Error handler scsi_eh_%d exiting\n", shost->host_no));
    shost->ehandler = NULL;
    return 0;
}

shost->transportt->eh_strategy_handler(shost);
此处是一个函数指针调用,在之前的文章我已经详细说了这个指针的由来,现在我们看一下这个指针的赋值:

linux/driver/ata/libata-scsi.c

/*
 * libata transport template.  libata doesn't do real transport stuff.
 * It just needs the eh_timed_out hook.
 */
static struct scsi_transport_template ata_scsi_transport_template = {
    .eh_strategy_handler    = ata_scsi_error,
    .eh_timed_out        = ata_scsi_timed_out,
    .user_scan        = ata_scsi_user_scan,
};

该结构在ata_scsi_add_host函数中被传递到上面,上一篇文章对此有提及,可以参考该系列的上一篇文章。

下面我们看一下ata_scsi_error的由来:

linux/driver/ata/libata-eh.c
/**
 *    ata_scsi_error - SCSI layer error handler callback
 *    @host: SCSI host on which error occurred
 *
 *    Handles SCSI-layer-thrown error events.
 *
 *    LOCKING:
 *    Inherited from SCSI layer (none, can sleep)
 *
 *    RETURNS:
 *    Zero.
 */
void ata_scsi_error(struct Scsi_Host *host)
{
    struct ata_port *ap = ata_shost_to_port(host);
    int i;
    unsigned long flags;

    DPRINTK("ENTER\n");

    /* synchronize with port task */
    ata_port_flush_task(ap);

    /* synchronize with host lock and sort out timeouts */

    /* For new EH, all qcs are finished in one of three ways -
     * normal completion, error completion, and SCSI timeout.
     * Both cmpletions can race against SCSI timeout.  When normal
     * completion wins, the qc never reaches EH.  When error
     * completion wins, the qc has ATA_QCFLAG_FAILED set.
     *
     * When SCSI timeout wins, things are a bit more complex.
     * Normal or error completion can occur after the timeout but
     * before this point.  In such cases, both types of
     * completions are honored.  A scmd is determined to have
     * timed out iff its associated qc is active and not failed.
     */
    if (ap->ops->error_handler) {
        struct scsi_cmnd *scmd, *tmp;
        int nr_timedout = 0;

        spin_lock_irqsave(ap->lock, flags);

        list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
            struct ata_queued_cmd *qc;

            for (i = 0; i < ATA_MAX_QUEUE; i++) {
                qc = __ata_qc_from_tag(ap, i);
                if (qc->flags & ATA_QCFLAG_ACTIVE &&
                    qc->scsicmd == scmd)
                    break;
            }

            if (i < ATA_MAX_QUEUE) {
                /* the scmd has an associated qc */
                if (!(qc->flags & ATA_QCFLAG_FAILED)) {
                    /* which hasn't failed yet, timeout */
                    qc->err_mask |= AC_ERR_TIMEOUT;
                    qc->flags |= ATA_QCFLAG_FAILED;
                    nr_timedout++;
                }
            } else {
                /* Normal completion occurred after
                 * SCSI timeout but before this point.
                 * Successfully complete it.
                 */
                scmd->retries = scmd->allowed;
                scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
            }
        }

        /* If we have timed out qcs.  They belong to EH from
         * this point but the state of the controller is
         * unknown.  Freeze the port to make sure the IRQ
         * handler doesn't diddle with those qcs.  This must
         * be done atomically w.r.t. setting QCFLAG_FAILED.
         */
        if (nr_timedout)
            __ata_port_freeze(ap);

        spin_unlock_irqrestore(ap->lock, flags);

        /* initialize eh_tries */
        ap->eh_tries = ATA_EH_MAX_TRIES;
    } else
        spin_unlock_wait(ap->lock);

 repeat:
    /* invoke error handler */
    if (ap->ops->error_handler) {
        struct ata_link *link;

        /* kill fast drain timer */
        del_timer_sync(&ap->fastdrain_timer);

        /* process port resume request */
        ata_eh_handle_port_resume(ap);

        /* fetch & clear EH info */
        spin_lock_irqsave(ap->lock, flags);

        __ata_port_for_each_link(link, ap) {
            memset(&link->eh_context, 0, sizeof(link->eh_context));
            link->eh_context.i = link->eh_info;
            memset(&link->eh_info, 0, sizeof(link->eh_info));
        }

        ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
        ap->pflags &= ~ATA_PFLAG_EH_PENDING;
        ap->excl_link = NULL;    /* don't maintain exclusion over EH */

        spin_unlock_irqrestore(ap->lock, flags);

        /* invoke EH, skip if unloading or suspended */
        if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
            ap->ops->error_handler(ap);

        else
            ata_eh_finish(ap);

        /* process port suspend request */
        ata_eh_handle_port_suspend(ap);

        /* Exception might have happend after ->error_handler
         * recovered the port but before this point.  Repeat
         * EH in such case.
         */
        spin_lock_irqsave(ap->lock, flags);

        if (ap->pflags & ATA_PFLAG_EH_PENDING) {
            if (--ap->eh_tries) {
                spin_unlock_irqrestore(ap->lock, flags);
                goto repeat;
            }
            ata_port_printk(ap, KERN_ERR, "EH pending after %d "
                    "tries, giving up\n", ATA_EH_MAX_TRIES);
            ap->pflags &= ~ATA_PFLAG_EH_PENDING;
        }

        /* this run is complete, make sure EH info is clear */
        __ata_port_for_each_link(link, ap)
            memset(&link->eh_info, 0, sizeof(link->eh_info));

        /* Clear host_eh_scheduled while holding ap->lock such
         * that if exception occurs after this point but
         * before EH completion, SCSI midlayer will
         * re-initiate EH.
         */
        host->host_eh_scheduled = 0;

        spin_unlock_irqrestore(ap->lock, flags);
    } else {
        WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
        ap->ops->eng_timeout(ap);
    }

    /* finish or retry handled scmd's and clean up */
    WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));

    scsi_eh_flush_done_q(&ap->eh_done_q);

    /* clean up */
    spin_lock_irqsave(ap->lock, flags);

    if (ap->pflags & ATA_PFLAG_LOADING)
        ap->pflags &= ~ATA_PFLAG_LOADING;
    else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
        queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);

    if (ap->pflags & ATA_PFLAG_RECOVERED)
        ata_port_printk(ap, KERN_INFO, "EH complete\n");

    ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);

    /* tell wait_eh that we're done */
    ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
    wake_up_all(&ap->eh_wait_q);

    spin_unlock_irqrestore(ap->lock, flags);

    DPRINTK("EXIT\n");
}
注意在这个函数的后面唤醒了之前我们强制等待的初始化主线程,这个函数执行完后这个内核线程执行完了一个周期,等待下一次的唤醒。 我们继续分析这个函数中是如何开始扫描每一个sata口的。

linux/driver/ata/sata_fsl.c

static const struct ata_port_info sata_fsl_port_info[] = {
    {
     .flags = SATA_FSL_HOST_FLAGS,
     .link_flags = SATA_FSL_HOST_LFLAGS,
     .pio_mask = 0x1f,    /* pio 0-4 */
     .udma_mask = 0x7f,    /* udma 0-6 */
     .port_ops = &sata_fsl_ops,
     },
};

static const struct ata_port_operations sata_fsl_ops = {
    .check_status = sata_fsl_check_status,
    .check_altstatus = sata_fsl_check_status,
    .dev_select = ata_noop_dev_select,

    .tf_read = sata_fsl_tf_read,

    .qc_prep = sata_fsl_qc_prep,
    .qc_issue = sata_fsl_qc_issue,
    .irq_clear = sata_fsl_irq_clear,

    .scr_read = sata_fsl_scr_read,
    .scr_write = sata_fsl_scr_write,

    .freeze = sata_fsl_freeze,
    .thaw = sata_fsl_thaw,
    .error_handler = sata_fsl_error_handler,
    .post_internal_cmd = sata_fsl_post_internal_cmd,

    .port_start = sata_fsl_port_start,
    .port_stop = sata_fsl_port_stop,

    .pmp_attach = sata_fsl_pmp_attach,
    .pmp_detach = sata_fsl_pmp_detach,
};

static void sata_fsl_error_handler(struct ata_port *ap)
{

    DPRINTK("in xx_error_handler\n");

    /* perform recovery */
    sata_pmp_do_eh(ap, ata_std_prereset, sata_fsl_softreset,
               sata_std_hardreset, ata_std_postreset,
               sata_pmp_std_prereset, sata_fsl_pmp_softreset,
               sata_pmp_std_hardreset, sata_pmp_std_postreset);

}
这个函数比较简单,是在在驱动中提供了sata_pmp_do_eh的一个接口, 因为mpc8315 CPU SATA 接口支持PM功能,所以这里使用pmp模块的函数处理。

/linux/driver/libata-pmp.c

/**
 *    sata_pmp_do_eh - do standard error handling for PMP-enabled host
 *    @ap: host port to handle error for
 *    @prereset: prereset method (can be NULL)
 *    @softreset: softreset method
 *    @hardreset: hardreset method
 *    @postreset: postreset method (can be NULL)
 *    @pmp_prereset: PMP prereset method (can be NULL)
 *    @pmp_softreset: PMP softreset method (can be NULL)
 *    @pmp_hardreset: PMP hardreset method (can be NULL)
 *    @pmp_postreset: PMP postreset method (can be NULL)
 *
 *    Perform standard error handling sequence for PMP-enabled host
 *    @ap.
 *
 *    LOCKING:
 *    Kernel thread context (may sleep).
 */
void sata_pmp_do_eh(struct ata_port *ap,
        ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
        ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
        ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
        ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset)
{
    DPRINTK("ENTER\n");
    ata_eh_autopsy(ap);
    ata_eh_report(ap);
    sata_pmp_eh_recover(ap, prereset, softreset, hardreset, postreset,
                pmp_prereset, pmp_softreset, pmp_hardreset,
                pmp_postreset);

    ata_eh_finish(ap);
    DPRINTK("EXIT\n");
}
到这里往下的部分已经很简单了,读者可以自己查找代码进一步的阅读,我们主要分析函数sata_pmp_eh_recover.


/**
 *    sata_pmp_eh_recover - recover PMP-enabled port
 *    @ap: ATA port to recover
 *    @prereset: prereset method (can be NULL)
 *    @softreset: softreset method
 *    @hardreset: hardreset method
 *    @postreset: postreset method (can be NULL)
 *    @pmp_prereset: PMP prereset method (can be NULL)
 *    @pmp_softreset: PMP softreset method (can be NULL)
 *    @pmp_hardreset: PMP hardreset method (can be NULL)
 *    @pmp_postreset: PMP postreset method (can be NULL)
 *
 *    Drive EH recovery operation for PMP enabled port @ap.  This
 *    function recovers host and PMP ports with proper retrials and
 *    fallbacks.  Actual recovery operations are performed using
 *    ata_eh_recover() and sata_pmp_eh_recover_pmp().
 *
 *    LOCKING:
 *    Kernel thread context (may sleep).
 *
 *    RETURNS:
 *    0 on success, -errno on failure.
 */
static int sata_pmp_eh_recover(struct ata_port *ap,
        ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
        ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
        ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
        ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset)
{
    int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
    struct ata_link *pmp_link = &ap->link;
    struct ata_device *pmp_dev = pmp_link->device;
    struct ata_eh_context *pmp_ehc = &pmp_link->eh_context;
    struct ata_link *link;
    struct ata_device *dev;
    unsigned int err_mask;
    u32 gscr_error, sntf;
    int cnt, rc;

    pmp_tries = ATA_EH_PMP_TRIES;
    ata_port_for_each_link(link, ap)
        link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;

 retry:
    /* PMP attached? */
    if (!ap->nr_pmp_links) {
        rc = ata_eh_recover(ap, prereset, softreset, hardreset,
                    postreset, NULL);
        if (rc) {
            ata_link_for_each_dev(dev, &ap->link)
                ata_dev_disable(dev);
            return rc;
        }

        if (pmp_dev->class != ATA_DEV_PMP)
            return 0;

        /* new PMP online */
        ata_port_for_each_link(link, ap)
            link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;

        /* fall through */
    }

    /* recover pmp */
    rc = sata_pmp_eh_recover_pmp(ap, prereset, softreset, hardreset,
                     postreset);
    if (rc)
        goto pmp_fail;

    /* handle disabled links */
    rc = sata_pmp_eh_handle_disabled_links(ap);
    if (rc)
        goto pmp_fail;

    /* recover links */
    rc = ata_eh_recover(ap, pmp_prereset, pmp_softreset, pmp_hardreset,
                pmp_postreset, &link);

    if (rc)
        goto link_fail;

    /* Connection status might have changed while resetting other
     * links, check SATA_PMP_GSCR_ERROR before returning.
     */

    /* clear SNotification */
    rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
    if (rc == 0)
        sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);

    /* enable notification */
    if (pmp_dev->flags & ATA_DFLAG_AN) {
        pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;

        err_mask = sata_pmp_write(pmp_dev->link, SATA_PMP_GSCR_FEAT_EN,
                      pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN]);
        if (err_mask) {
            ata_dev_printk(pmp_dev, KERN_ERR, "failed to write "
                       "PMP_FEAT_EN (Emask=0x%x)\n", err_mask);
            rc = -EIO;
            goto pmp_fail;
        }
    }

    /* check GSCR_ERROR */
    err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error);
    if (err_mask) {
        ata_dev_printk(pmp_dev, KERN_ERR, "failed to read "
                   "PMP_GSCR_ERROR (Emask=0x%x)\n", err_mask);
        rc = -EIO;
        goto pmp_fail;
    }

    cnt = 0;
    ata_port_for_each_link(link, ap) {
        if (!(gscr_error & (1 << link->pmp)))
            continue;

        if (sata_pmp_handle_link_fail(link, link_tries)) {
            ata_ehi_hotplugged(&link->eh_context.i);
            cnt++;
        } else {
            ata_link_printk(link, KERN_WARNING,
                "PHY status changed but maxed out on retries, "
                "giving up\n");
            ata_link_printk(link, KERN_WARNING,
                "Manully issue scan to resume this link\n");
        }
    }

    if (cnt) {
        ata_port_printk(ap, KERN_INFO, "PMP SError.N set for some "
                "ports, repeating recovery\n");
        goto retry;
    }

    return 0;

 link_fail:
    if (sata_pmp_handle_link_fail(link, link_tries)) {
        pmp_ehc->i.action |= ATA_EH_HARDRESET;
        goto retry;
    }

    /* fall through */
 pmp_fail:
    /* Control always ends up here after detaching PMP.  Shut up
     * and return if we're unloading.
     */
    if (ap->pflags & ATA_PFLAG_UNLOADING)
        return rc;

    if (!ap->nr_pmp_links)
        goto retry;

    if (--pmp_tries) {
        ata_port_printk(ap, KERN_WARNING,
                "failed to recover PMP, retrying in 5 secs\n");
        pmp_ehc->i.action |= ATA_EH_HARDRESET;
        ssleep(5);
        goto retry;
    }

    ata_port_printk(ap, KERN_ERR,
            "failed to recover PMP after %d tries, giving up\n",
            ATA_EH_PMP_TRIES);
    sata_pmp_detach(pmp_dev);
    ata_dev_disable(pmp_dev);

    return rc;
}

/linux/driver/ata/libata_eh.c
/**
 *    ata_eh_recover - recover host port after error
 *    @ap: host port to recover
 *    @prereset: prereset method (can be NULL)
 *    @softreset: softreset method (can be NULL)
 *    @hardreset: hardreset method (can be NULL)
 *    @postreset: postreset method (can be NULL)
 *    @r_failed_link: out parameter for failed link
 *
 *    This is the alpha and omega, eum and yang, heart and soul of
 *    libata exception handling.  On entry, actions required to
 *    recover each link and hotplug requests are recorded in the
 *    link's eh_context.  This function executes all the operations
 *    with appropriate retrials and fallbacks to resurrect failed
 *    devices, detach goners and greet newcomers.
 *
 *    LOCKING:
 *    Kernel thread context (may sleep).
 *
 *    RETURNS:
 *    0 on success, -errno on failure.
 */
int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
           ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
           ata_postreset_fn_t postreset,
           struct ata_link **r_failed_link)
{
    struct ata_link *link;
    struct ata_device *dev;
    int nr_failed_devs, nr_disabled_devs;
    int reset, rc;
    unsigned long flags;

    DPRINTK("ENTER\n");

    /* prep for recovery */
    ata_port_for_each_link(link, ap) {
        struct ata_eh_context *ehc = &link->eh_context;

        /* re-enable link? */
        if (ehc->i.action & ATA_EH_ENABLE_LINK) {
            ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
            spin_lock_irqsave(ap->lock, flags);
            link->flags &= ~ATA_LFLAG_DISABLED;
            spin_unlock_irqrestore(ap->lock, flags);
            ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
        }

        ata_link_for_each_dev(dev, link) {
            if (link->flags & ATA_LFLAG_NO_RETRY)
                ehc->tries[dev->devno] = 1;
            else
                ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;

            /* collect port action mask recorded in dev actions */
            ehc->i.action |= ehc->i.dev_action[dev->devno] &
                     ~ATA_EH_PERDEV_MASK;
            ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;

            /* process hotplug request */
            if (dev->flags & ATA_DFLAG_DETACH)
                ata_eh_detach_dev(dev);

            if (!ata_dev_enabled(dev) &&
                ((ehc->i.probe_mask & (1 << dev->devno)) &&
                 !(ehc->did_probe_mask & (1 << dev->devno)))) {
                ata_eh_detach_dev(dev);
                ata_dev_init(dev);
                ehc->did_probe_mask |= (1 << dev->devno);
                ehc->i.action |= ATA_EH_SOFTRESET;
            }
        }
    }

 retry:
    rc = 0;
    nr_failed_devs = 0;
    nr_disabled_devs = 0;
    reset = 0;

    /* if UNLOADING, finish immediately */
    if (ap->pflags & ATA_PFLAG_UNLOADING)
        goto out;

    /* prep for EH */
    ata_port_for_each_link(link, ap) {
        struct ata_eh_context *ehc = &link->eh_context;

        /* skip EH if possible. */
        if (ata_eh_skip_recovery(link))
            ehc->i.action = 0;

        /* do we need to reset? */
        if (ehc->i.action & ATA_EH_RESET_MASK)
            reset = 1;

        ata_link_for_each_dev(dev, link)
            ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
    }

    /* reset */
    if (reset) {
        /* if PMP is attached, this function only deals with
         * downstream links, port should stay thawed.
         */
        if (!ap->nr_pmp_links)
            ata_eh_freeze_port(ap);

        ata_port_for_each_link(link, ap) {
            struct ata_eh_context *ehc = &link->eh_context;

            if (!(ehc->i.action & ATA_EH_RESET_MASK))
                continue;

            rc = ata_eh_reset(link, ata_link_nr_vacant(link),
                      prereset, softreset, hardreset,
                      postreset);

            if (rc) {
                ata_link_printk(link, KERN_ERR,
                        "reset failed, giving up\n");
                goto out;
            }
        }

        if (!ap->nr_pmp_links)
            ata_eh_thaw_port(ap);
    }

    /* the rest */
    ata_port_for_each_link(link, ap) {
        struct ata_eh_context *ehc = &link->eh_context;

        /* revalidate existing devices and attach new ones */
        rc = ata_eh_revalidate_and_attach(link, &dev);
        if (rc)
            goto dev_fail;

        /* if PMP got attached, return, pmp EH will take care of it */
        if (link->device->class == ATA_DEV_PMP) {
            ehc->i.action = 0;
            return 0;
        }

        /* configure transfer mode if necessary */
        if (ehc->i.flags & ATA_EHI_SETMODE) {
            rc = ata_set_mode(link, &dev);
            if (rc)
                goto dev_fail;
            ehc->i.flags &= ~ATA_EHI_SETMODE;
        }

        if (ehc->i.action & ATA_EHI_LPM)
            ata_link_for_each_dev(dev, link)
                ata_dev_enable_pm(dev, ap->pm_policy);

        /* this link is okay now */
        ehc->i.flags = 0;
        continue;

dev_fail:
        nr_failed_devs++;
        if (ata_eh_handle_dev_fail(dev, rc))
            nr_disabled_devs++;

        if (ap->pflags & ATA_PFLAG_FROZEN) {
            /* PMP reset requires working host port.
             * Can't retry if it's frozen.
             */
            if (ap->nr_pmp_links)
                goto out;
            break;
        }
    }

    if (nr_failed_devs) {
        if (nr_failed_devs != nr_disabled_devs) {
            ata_port_printk(ap, KERN_WARNING, "failed to recover "
                    "some devices, retrying in 5 secs\n");
            ssleep(5);
        } else {
            /* no device left to recover, repeat fast */
            msleep(500);
        }

        goto retry;
    }

 out:
    if (rc && r_failed_link)
        *r_failed_link = link;

    DPRINTK("EXIT, rc=%d\n", rc);
    return rc;
}

int ata_eh_reset(struct ata_link *link, int classify,
         ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
         ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
{
    const int max_tries = ARRAY_SIZE(ata_eh_reset_timeouts);
    struct ata_port *ap = link->ap;
    struct ata_eh_context *ehc = &link->eh_context;
    unsigned int *classes = ehc->classes;
    unsigned int lflags = link->flags;
    int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
    int try = 0;
    struct ata_device *dev;
    unsigned long deadline, now;
    unsigned int tmp_action;
    ata_reset_fn_t reset;
    unsigned long flags;
    u32 sstatus;
    int rc;

    /* about to reset */
    spin_lock_irqsave(ap->lock, flags);
    ap->pflags |= ATA_PFLAG_RESETTING;
    spin_unlock_irqrestore(ap->lock, flags);

    ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);

    ata_link_for_each_dev(dev, link) {
        /* If we issue an SRST then an ATA drive (not ATAPI)
         * may change configuration and be in PIO0 timing. If
         * we do a hard reset (or are coming from power on)
         * this is true for ATA or ATAPI. Until we've set a
         * suitable controller mode we should not touch the
         * bus as we may be talking too fast.
         */
        dev->pio_mode = XFER_PIO_0;

        /* If the controller has a pio mode setup function
         * then use it to set the chipset to rights. Don't
         * touch the DMA setup as that will be dealt with when
         * configuring devices.
         */
        if (ap->ops->set_piomode)
            ap->ops->set_piomode(ap, dev);
    }

    /* Determine which reset to use and record in ehc->i.action.
     * prereset() may examine and modify it.
     */
    if (softreset && (!hardreset || (!(lflags & ATA_LFLAG_NO_SRST) &&
                     !sata_set_spd_needed(link) &&
                     !(ehc->i.action & ATA_EH_HARDRESET))))
        tmp_action = ATA_EH_SOFTRESET;
    else
        tmp_action = ATA_EH_HARDRESET;

    ehc->i.action = (ehc->i.action & ~ATA_EH_RESET_MASK) | tmp_action;

    if (prereset) {
        rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT);
        if (rc) {
            if (rc == -ENOENT) {
                ata_link_printk(link, KERN_DEBUG,
                        "port disabled. ignoring.\n");
                ehc->i.action &= ~ATA_EH_RESET_MASK;

                ata_link_for_each_dev(dev, link)
                    classes[dev->devno] = ATA_DEV_NONE;

                rc = 0;
            } else
                ata_link_printk(link, KERN_ERR,
                    "prereset failed (errno=%d)\n", rc);
            goto out;
        }
    }

    /* prereset() might have modified ehc->i.action */
    if (ehc->i.action & ATA_EH_HARDRESET)
        reset = hardreset;
    else if (ehc->i.action & ATA_EH_SOFTRESET)
        reset = softreset;
    else {
        /* prereset told us not to reset, bang classes and return */
        ata_link_for_each_dev(dev, link)
            classes[dev->devno] = ATA_DEV_NONE;
        rc = 0;
        goto out;
    }

    /* did prereset() screw up?  if so, fix up to avoid oopsing */
    if (!reset) {
        if (softreset)
            reset = softreset;
        else
            reset = hardreset;
    }

 retry:
    deadline = jiffies + ata_eh_reset_timeouts[try++];

    /* shut up during boot probing */
    if (verbose)
        ata_link_printk(link, KERN_INFO, "%s resetting link\n",
                reset == softreset ? "soft" : "hard");

    /* mark that this EH session started with reset */
    if (reset == hardreset)
        ehc->i.flags |= ATA_EHI_DID_HARDRESET;
    else
        ehc->i.flags |= ATA_EHI_DID_SOFTRESET;

    rc = ata_do_reset(link, reset, classes, deadline);

    if (reset == hardreset &&
        ata_eh_followup_srst_needed(link, rc, classify, classes)) {
        /* okay, let's do follow-up softreset */
        reset = softreset;

        if (!reset) {
            ata_link_printk(link, KERN_ERR,
                    "follow-up softreset required "
                    "but no softreset avaliable\n");
            rc = -EINVAL;
            goto fail;
        }

        ata_eh_about_to_do(link, NULL, ATA_EH_RESET_MASK);
        rc = ata_do_reset(link, reset, classes, deadline);
    }


    /* -EAGAIN can happen if we skipped followup SRST */
    if (rc && rc != -EAGAIN)
        goto fail;

    /* was classification successful? */
    if (classify && classes[0] == ATA_DEV_UNKNOWN &&
        !(lflags & ATA_LFLAG_ASSUME_CLASS)) {
        if (try < max_tries) {
            ata_link_printk(link, KERN_WARNING,
                    "classification failed\n");
            rc = -EINVAL;
            goto fail;
        }

        ata_link_printk(link, KERN_WARNING,
                "classfication failed, assuming ATA\n");
        lflags |= ATA_LFLAG_ASSUME_ATA;
    }

    ata_link_for_each_dev(dev, link) {
        /* After the reset, the device state is PIO 0 and the
         * controller state is undefined.  Reset also wakes up
         * drives from sleeping mode.
         */
        dev->pio_mode = XFER_PIO_0;
        dev->flags &= ~ATA_DFLAG_SLEEPING;

        if (ata_link_offline(link))
            continue;

        /* apply class override */
        if (lflags & ATA_LFLAG_ASSUME_ATA)
            classes[dev->devno] = ATA_DEV_ATA;
        else if (lflags & ATA_LFLAG_ASSUME_SEMB)
            classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */
    }

    /* record current link speed */
    if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
        link->sata_spd = (sstatus >> 4) & 0xf;

    if (postreset)
        postreset(link, classes);

    /* reset successful, schedule revalidation */
    ata_eh_done(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
    ehc->i.action |= ATA_EH_REVALIDATE;

    rc = 0;
 out:
    /* clear hotplug flag */
    ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;

    spin_lock_irqsave(ap->lock, flags);
    ap->pflags &= ~ATA_PFLAG_RESETTING;
    spin_unlock_irqrestore(ap->lock, flags);

    return rc;

 fail:
    if (rc == -ERESTART || try >= max_tries)
        goto out;

    now = jiffies;
    if (time_before(now, deadline)) {
        unsigned long delta = deadline - now;

        ata_link_printk(link, KERN_WARNING, "reset failed "
                "(errno=%d), retrying in %u secs\n",
                rc, (jiffies_to_msecs(delta) + 999) / 1000);

        while (delta)
            delta = schedule_timeout_uninterruptible(delta);
    }

    if (rc == -EPIPE || try == max_tries - 1)
        sata_down_spd_limit(link);
    if (hardreset)
        reset = hardreset;
    goto retry;
}

这个函数比较复杂,读者可以自己分析下, 由于篇幅所限就不一一分析下去了。
阅读(3521) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~