4
* Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
5
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
8
#include <linux/errno.h>
9
#include <linux/sched.h>
10
#include <linux/string.h>
12
#include <linux/smp.h>
13
#include <linux/smp_lock.h>
14
#include <linux/vmalloc.h>
15
#include <linux/slab.h>
17
#include <asm/uaccess.h>
18
#include <asm/system.h>
22
#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
23
static void flush_ldt(void *mm)
25
if (current->active_mm)
26
load_LDT(¤t->active_mm->context);
30
static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
36
if (mincount <= pc->size)
39
mincount = (mincount+511)&(~511);
40
if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
41
newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
43
newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
49
memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
52
memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
59
(pc->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
61
flush_page_update_queue();
63
if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
64
smp_call_function(flush_ldt, 0, 1, 1);
69
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
77
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
79
int err = alloc_ldt(new, old->size, 0);
81
printk(KERN_WARNING "ldt allocation failed\n");
85
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
86
make_pages_readonly(new->ldt, (new->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
91
* we do not have to muck with descriptors here, that is
92
* done in switch_mm() as needed.
94
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
96
struct mm_struct * old_mm;
99
init_MUTEX(&mm->context.sem);
100
mm->context.size = 0;
101
old_mm = current->mm;
102
if (old_mm && old_mm->context.size > 0) {
103
down(&old_mm->context.sem);
104
retval = copy_ldt(&mm->context, &old_mm->context);
105
up(&old_mm->context.sem);
111
* No need to lock the MM as we are the last user
112
* Do not touch the ldt register, we are already
113
* in the next thread.
115
void destroy_context(struct mm_struct *mm)
117
if (mm->context.size) {
118
make_pages_writeable(
120
(mm->context.size*LDT_ENTRY_SIZE)/PAGE_SIZE);
121
flush_page_update_queue();
122
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
123
vfree(mm->context.ldt);
125
kfree(mm->context.ldt);
126
mm->context.size = 0;
130
static int read_ldt(void * ptr, unsigned long bytecount)
134
struct mm_struct * mm = current->mm;
136
if (!mm->context.size)
138
if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
139
bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
141
down(&mm->context.sem);
142
size = mm->context.size*LDT_ENTRY_SIZE;
143
if (size > bytecount)
147
if (copy_to_user(ptr, mm->context.ldt, size))
149
up(&mm->context.sem);
152
if (size != bytecount) {
153
/* zero-fill the rest */
154
clear_user(ptr+size, bytecount-size);
160
static int read_default_ldt(void * ptr, unsigned long bytecount)
167
address = &default_ldt[0];
168
size = 5*sizeof(struct desc_struct);
169
if (size > bytecount)
173
if (copy_to_user(ptr, address, size))
179
static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
181
struct mm_struct * mm = current->mm;
182
__u32 entry_1, entry_2, *lp;
183
unsigned long phys_lp, max_limit;
185
struct modify_ldt_ldt_s ldt_info;
188
if (bytecount != sizeof(ldt_info))
191
if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
195
if (ldt_info.entry_number >= LDT_ENTRIES)
197
if (ldt_info.contents == 3) {
200
if (ldt_info.seg_not_present == 0)
205
* This makes our tests for overlap with Xen space easier. There's no good
206
* reason to have a user segment starting this high anyway.
208
if (ldt_info.base_addr >= PAGE_OFFSET)
211
down(&mm->context.sem);
212
if (ldt_info.entry_number >= mm->context.size) {
213
error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1);
219
lp = (__u32 *)((ldt_info.entry_number<<3) + (char *)mm->context.ldt);
220
phys_lp = arbitrary_virt_to_phys(lp);
222
/* Allow LDTs to be cleared by the user. */
223
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
225
(ldt_info.contents == 0 &&
226
ldt_info.read_exec_only == 1 &&
227
ldt_info.seg_32bit == 0 &&
228
ldt_info.limit_in_pages == 0 &&
229
ldt_info.seg_not_present == 1 &&
230
ldt_info.useable == 0 )) {
237
max_limit = HYPERVISOR_VIRT_START - ldt_info.base_addr;
238
if ( ldt_info.limit_in_pages )
239
max_limit >>= PAGE_SHIFT;
241
if ( (ldt_info.limit & 0xfffff) > (max_limit & 0xfffff) )
242
ldt_info.limit = max_limit;
244
entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
245
(ldt_info.limit & 0x0ffff);
246
entry_2 = (ldt_info.base_addr & 0xff000000) |
247
((ldt_info.base_addr & 0x00ff0000) >> 16) |
248
(ldt_info.limit & 0xf0000) |
249
((ldt_info.read_exec_only ^ 1) << 9) |
250
(ldt_info.contents << 10) |
251
((ldt_info.seg_not_present ^ 1) << 15) |
252
(ldt_info.seg_32bit << 22) |
253
(ldt_info.limit_in_pages << 23) |
256
entry_2 |= (ldt_info.useable << 20);
258
/* Install the new entry ... */
260
error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
263
up(&mm->context.sem);
268
asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
274
ret = read_ldt(ptr, bytecount);
277
ret = write_ldt(ptr, bytecount, 1);
280
ret = read_default_ldt(ptr, bytecount);
283
ret = write_ldt(ptr, bytecount, 0);