2
* Copyright (c) 2001-2004 Jakub Jermar
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
9
* - Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* - Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
* - The name of the author may not be used to endorse or promote products
15
* derived from this software without specific prior written permission.
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
* @brief High-level clock interrupt handler.
37
* This file contains the clock() function which is the source
38
* of preemption. It is also responsible for executing expired
42
#include <time/clock.h>
43
#include <time/timeout.h>
45
#include <synch/spinlock.h>
46
#include <synch/waitq.h>
48
#include <proc/scheduler.h>
53
#include <proc/thread.h>
54
#include <sysinfo/sysinfo.h>
55
#include <arch/barrier.h>
59
/* Pointer to variable with uptime */
62
/** Physical memory area of the real time clock */
63
static parea_t clock_parea;
65
/* Variable holding fragment of second, so that we would update
68
static unative_t secfrag = 0;
70
/** Initialize realtime clock counter
72
* The applications (and sometimes kernel) need to access accurate
73
* information about realtime data. We allocate 1 page with these
74
* data and update it periodically.
76
void clock_counter_init(void)
80
faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
82
panic("Cannot allocate page for clock.");
84
uptime = (uptime_t *) PA2KA(faddr);
90
clock_parea.pbase = (uintptr_t) faddr;
91
clock_parea.frames = 1;
92
ddi_parea_register(&clock_parea);
95
* Prepare information for the userspace so that it can successfully
96
* physmem_map() the clock_parea.
98
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
99
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
103
/** Update public counters
105
* Update it only on first processor
106
* TODO: Do we really need so many write barriers?
108
static void clock_update_counters(void)
111
secfrag += 1000000 / HZ;
112
if (secfrag >= 1000000) {
116
uptime->useconds = secfrag;
118
uptime->seconds2 = uptime->seconds1;
120
uptime->useconds += 1000000 / HZ;
126
* Clock routine executed from clock interrupt handler
127
* (assuming interrupts_disable()'d). Runs expired timeouts
128
* and preemptive scheduling.
137
size_t missed_clock_ticks = CPU->missed_clock_ticks;
141
* To avoid lock ordering problems,
142
* run all expired timeouts as you visit them.
144
for (i = 0; i <= missed_clock_ticks; i++) {
145
clock_update_counters();
146
spinlock_lock(&CPU->timeoutlock);
147
while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
148
h = list_get_instance(l, timeout_t, link);
149
spinlock_lock(&h->lock);
150
if (h->ticks-- != 0) {
151
spinlock_unlock(&h->lock);
157
timeout_reinitialize(h);
158
spinlock_unlock(&h->lock);
159
spinlock_unlock(&CPU->timeoutlock);
163
spinlock_lock(&CPU->timeoutlock);
165
spinlock_unlock(&CPU->timeoutlock);
167
CPU->missed_clock_ticks = 0;
170
* Do CPU usage accounting and find out whether to preempt THREAD.
176
spinlock_lock(&CPU->lock);
177
CPU->needs_relink += 1 + missed_clock_ticks;
178
spinlock_unlock(&CPU->lock);
180
spinlock_lock(&THREAD->lock);
181
if ((ticks = THREAD->ticks)) {
182
if (ticks >= 1 + missed_clock_ticks)
183
THREAD->ticks -= 1 + missed_clock_ticks;
187
spinlock_unlock(&THREAD->lock);
189
if (!ticks && !PREEMPTION_DISABLED) {
196
* Give udebug chance to stop the thread
197
* before it begins executing userspace code.
199
istate = THREAD->udebug.uspace_state;
200
if (istate && istate_from_uspace(istate))
201
udebug_before_thread_runs();