if (!atomic_inc_and_test(&pt->pending))
set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
- if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
- vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+
+ if (vcpu0 && waitqueue_active(&vcpu0->wq))
wake_up_interruptible(&vcpu0->wq);
- }
- pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
- pt->scheduled = ktime_to_ns(pt->timer.expires);
+ hrtimer_add_expires_ns(&pt->timer, pt->period);
- pt->scheduled = ktime_to_ns(hrtimer_get_expires(&pt->timer));
++ pt->scheduled = hrtimer_get_expires_ns(&pt->timer);
+ if (pt->period)
- ps->channels[0].count_load_time = pt->timer.expires;
++ ps->channels[0].count_load_time = hrtimer_get_expires(&pt->timer);
return (pt->period == 0 ? 0 : 1);
}
if(!atomic_inc_and_test(&apic->timer.pending))
set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests);
- if (waitqueue_active(q)) {
- apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(q))
wake_up_interruptible(q);
- }
+
if (apic_lvtt_period(apic)) {
result = 1;
- apic->timer.dev.expires = ktime_add_ns(
- apic->timer.dev.expires,
- apic->timer.period);
+ hrtimer_add_expires_ns(&apic->timer.dev, apic->timer.period);
}
return result;
}
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function = tick_sched_timer;
- ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
+ ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
/* Get the next period (per cpu) */
- ts->sched_timer.expires = tick_init_jiffy_update();
+ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
offset = ktime_to_ns(tick_period) >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();