nanosleep

SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
        struct __kernel_timespec __user *, rmtp)
{
    struct timespec64 tu;

    if (get_timespec64(&tu, rqtp))
        return -EFAULT;

    if (!timespec64_valid(&tu))
        return -EINVAL;

    current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
    current->restart_block.nanosleep.rmtp = rmtp;
    return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}

long hrtimer_nanosleep(const struct timespec64 *rqtp,
               const enum hrtimer_mode mode, const clockid_t clockid)
{
    struct restart_block *restart;
    struct hrtimer_sleeper t;
    int ret = 0;
    u64 slack;

    slack = current->timer_slack_ns;
    if (dl_task(current) || rt_task(current))
        slack = 0;

    hrtimer_init_sleeper_on_stack(&t, clockid, mode);
    hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
    ret = do_nanosleep(&t, mode);
    if (ret != -ERESTART_RESTARTBLOCK)
        goto out;

    /* Absolute timers do not update the rmtp value and restart: */
    if (mode == HRTIMER_MODE_ABS) {
        ret = -ERESTARTNOHAND;
        goto out;
    }

    restart = ¤t->restart_block;
    restart->nanosleep.clockid = t.timer.base->clockid;
    restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
    set_restart_fn(restart, hrtimer_nanosleep_restart);
out:
    destroy_hrtimer_on_stack(&t.timer);
    return ret;
}

void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
                   clockid_t clock_id, enum hrtimer_mode mode)
{
    debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
    __hrtimer_init_sleeper(sl, clock_id, mode);
}

static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
                   clockid_t clock_id, enum hrtimer_mode mode)
{
    /*
     * On PREEMPT_RT enabled kernels hrtimers which are not explicitely
     * marked for hard interrupt expiry mode are moved into soft
     * interrupt context either for latency reasons or because the
     * hrtimer callback takes regular spinlocks or invokes other
     * functions which are not suitable for hard interrupt context on
     * PREEMPT_RT.
     *
     * The hrtimer_sleeper callback is RT compatible in hard interrupt
     * context, but there is a latency concern: Untrusted userspace can
     * spawn many threads which arm timers for the same expiry time on
     * the same CPU. That causes a latency spike due to the wakeup of
     * a gazillion threads.
     *
     * OTOH, priviledged real-time user space applications rely on the
     * low latency of hard interrupt wakeups. If the current task is in
     * a real-time scheduling class, mark the mode for hard interrupt
     * expiry.
     */
    if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
        if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
            mode |= HRTIMER_MODE_HARD;
    }

    __hrtimer_init(&sl->timer, clock_id, mode);
    sl->timer.function = hrtimer_wakeup;
    sl->task = current;
}

static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
{
    struct hrtimer_sleeper *t =
        container_of(timer, struct hrtimer_sleeper, timer);
    struct task_struct *task = t->task;

    t->task = NULL;
    if (task)
        wake_up_process(task);

    return HRTIMER_NORESTART;
}

你可能感兴趣的:(linux,driver,func,linux)