phy_polling_mode

struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
                     bool is_c45,
                     struct phy_c45_device_ids *c45_ids)
{
    struct phy_device *dev;
    struct mdio_device *mdiodev;
    int ret = 0;

    /* We allocate the device, and initialize the default values */
    dev = kzalloc(sizeof(*dev), GFP_KERNEL);
    if (!dev)
        return ERR_PTR(-ENOMEM);

    mdiodev = &dev->mdio;
    mdiodev->dev.parent = &bus->dev;
    mdiodev->dev.bus = &mdio_bus_type;
    mdiodev->dev.type = &mdio_bus_phy_type;
    mdiodev->bus = bus;
    mdiodev->bus_match = phy_bus_match;
    mdiodev->addr = addr;
    mdiodev->flags = MDIO_DEVICE_FLAG_PHY;
    mdiodev->device_free = phy_mdio_device_free;
    mdiodev->device_remove = phy_mdio_device_remove;

    dev->speed = SPEED_UNKNOWN;
    dev->duplex = DUPLEX_UNKNOWN;
    dev->pause = 0;
    dev->asym_pause = 0;
    dev->link = 0;
    dev->interface = PHY_INTERFACE_MODE_GMII;

    dev->autoneg = AUTONEG_ENABLE;

    dev->is_c45 = is_c45;
    dev->phy_id = phy_id;
    if (c45_ids)
        dev->c45_ids = *c45_ids;
    dev->irq = bus->irq[addr];

    dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr);
    device_initialize(&mdiodev->dev);

    dev->state = PHY_DOWN;

    mutex_init(&dev->lock);
    INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);

    /* Request the appropriate module unconditionally; don't
     * bother trying to do so only if it isn't already loaded,
     * because that gets complicated. A hotplug event would have
     * done an unconditional modprobe anyway.
     * We don't do normal hotplug because it won't work for MDIO
     * -- because it relies on the device staying around for long
     * enough for the driver to get loaded. With MDIO, the NIC
     * driver will get bored and give up as soon as it finds that
     * there's no driver _already_ loaded.
     */
    if (is_c45 && c45_ids) {
        const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
        int i;

        for (i = 1; i < num_ids; i++) {
            if (c45_ids->device_ids[i] == 0xffffffff)
                continue;

            ret = phy_request_driver_module(dev,
                        c45_ids->device_ids[i]);
            if (ret)
                break;
        }
    } else {
        ret = phy_request_driver_module(dev, phy_id);
    }

    if (ret) {
        put_device(&mdiodev->dev);
        dev = ERR_PTR(ret);
    }

    return dev;
}
EXPORT_SYMBOL(phy_device_create);

/**
 * phy_state_machine - Handle the state machine
 * @work: work_struct that describes the work to be done
 */
void phy_state_machine(struct work_struct *work)
{
    struct delayed_work *dwork = to_delayed_work(work);
    struct phy_device *phydev =
            container_of(dwork, struct phy_device, state_queue);
    bool needs_aneg = false, do_suspend = false;
    enum phy_state old_state;
    int err = 0;

    mutex_lock(&phydev->lock);

    old_state = phydev->state;

    switch (phydev->state) {
    case PHY_DOWN:
    case PHY_READY:
        break;
    case PHY_UP:
        needs_aneg = true;

        break;
    case PHY_NOLINK:
    case PHY_RUNNING:
        err = phy_check_link_status(phydev);
        break;
    case PHY_HALTED:
        if (phydev->link) {
            phydev->link = 0;
            phy_link_down(phydev, true);
        }
        do_suspend = true;
        break;
    }

    mutex_unlock(&phydev->lock);

    if (needs_aneg)
        err = phy_start_aneg(phydev);
    else if (do_suspend)
        phy_suspend(phydev);

    if (err < 0)
        phy_error(phydev);

    if (old_state != phydev->state) {
        phydev_dbg(phydev, "PHY state change %s -> %s\n",
               phy_state_to_str(old_state),
               phy_state_to_str(phydev->state));
        if (phydev->drv && phydev->drv->link_change_notify)
            phydev->drv->link_change_notify(phydev);
    }

    /* Only re-schedule a PHY state machine change if we are polling the
     * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
     * between states from phy_mac_interrupt().
     *
     * In state PHY_HALTED the PHY gets suspended, so rescheduling the
     * state machine would be pointless and possibly error prone when
     * called from phy_disconnect() synchronously.
     */
    mutex_lock(&phydev->lock);
    if (phy_polling_mode(phydev) && phy_is_started(phydev))
        phy_queue_state_machine(phydev, PHY_STATE_TIME);
    mutex_unlock(&phydev->lock);
}

/**
 * phy_polling_mode - Convenience function for testing whether polling is
 * used to detect PHY status changes
 * @phydev: the phy_device struct
 */
static inline bool phy_polling_mode(struct phy_device *phydev)
{
    return phydev->irq == PHY_POLL;
}

void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
    mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
             jiffies);
}

/**
 * phy_start - start or restart a PHY device
 * @phydev: target phy_device struct
 *
 * Description: Indicates the attached device's readiness to
 *   handle PHY-related work.  Used during startup to start the
 *   PHY, and after a call to phy_stop() to resume operation.
 *   Also used to indicate the MDIO bus has cleared an error
 *   condition.
 */
void phy_start(struct phy_device *phydev)
{
    mutex_lock(&phydev->lock);

    if (phydev->state != PHY_READY && phydev->state != PHY_HALTED) {
        WARN(1, "called from state %s\n",
             phy_state_to_str(phydev->state));
        goto out;
    }

    /* if phy was suspended, bring the physical link up again */
    __phy_resume(phydev);

    phydev->state = PHY_UP;

    phy_start_machine(phydev);
out:
    mutex_unlock(&phydev->lock);
}

void phy_start_machine(struct phy_device *phydev)
{
    phy_trigger_machine(phydev);
}

static void phy_trigger_machine(struct phy_device *phydev)
{
    phy_queue_state_machine(phydev, 0);
}

void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
    mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
             jiffies);
}

你可能感兴趣的:(linux,driver,func,linux)