tlb.c (77bf4400319db9d2a8af6b00c2be6faa0f3d07cb) tlb.c (ba180fd437156f7fd8cfb2fdd021d949eeef08d6)
1/*
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
3 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
7#include "asm/page.h"
8#include "asm/pgalloc.h"
9#include "asm/pgtable.h"
10#include "asm/tlbflush.h"
11#include "as-layout.h"
7#include "asm/pgtable.h"
8#include "asm/tlbflush.h"
9#include "as-layout.h"
12#include "tlb.h"
13#include "mem.h"
14#include "mem_user.h"
15#include "os.h"
16#include "skas.h"
10#include "mem_user.h"
11#include "os.h"
12#include "skas.h"
13#include "tlb.h"
17
18static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 unsigned int prot, struct host_vm_op *ops, int *index,
20 int last_filled, union mm_context *mmu, void **flush,
21 int (*do_ops)(union mm_context *, struct host_vm_op *,
22 int, int, void **))
23{
24 __u64 offset;
25 struct host_vm_op *last;
26 int fd, ret = 0;
27
28 fd = phys_mapping(phys, &offset);
14
15static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
16 unsigned int prot, struct host_vm_op *ops, int *index,
17 int last_filled, union mm_context *mmu, void **flush,
18 int (*do_ops)(union mm_context *, struct host_vm_op *,
19 int, int, void **))
20{
21 __u64 offset;
22 struct host_vm_op *last;
23 int fd, ret = 0;
24
25 fd = phys_mapping(phys, &offset);
29 if(*index != -1){
26 if (*index != -1) {
30 last = &ops[*index];
27 last = &ops[*index];
31 if((last->type == MMAP) &&
28 if ((last->type == MMAP) &&
32 (last->u.mmap.addr + last->u.mmap.len == virt) &&
33 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
29 (last->u.mmap.addr + last->u.mmap.len == virt) &&
30 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
34 (last->u.mmap.offset + last->u.mmap.len == offset)){
31 (last->u.mmap.offset + last->u.mmap.len == offset)) {
35 last->u.mmap.len += len;
36 return 0;
37 }
38 }
39
32 last->u.mmap.len += len;
33 return 0;
34 }
35 }
36
40 if(*index == last_filled){
37 if (*index == last_filled) {
41 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
42 *index = -1;
43 }
44
45 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
46 .u = { .mmap = {
47 .addr = virt,
48 .len = len,

--- 8 unchanged lines hidden (view full) ---

57 struct host_vm_op *ops, int *index, int last_filled,
58 union mm_context *mmu, void **flush,
59 int (*do_ops)(union mm_context *, struct host_vm_op *,
60 int, int, void **))
61{
62 struct host_vm_op *last;
63 int ret = 0;
64
38 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
39 *index = -1;
40 }
41
42 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
43 .u = { .mmap = {
44 .addr = virt,
45 .len = len,

--- 8 unchanged lines hidden (view full) ---

54 struct host_vm_op *ops, int *index, int last_filled,
55 union mm_context *mmu, void **flush,
56 int (*do_ops)(union mm_context *, struct host_vm_op *,
57 int, int, void **))
58{
59 struct host_vm_op *last;
60 int ret = 0;
61
65 if(*index != -1){
62 if (*index != -1) {
66 last = &ops[*index];
63 last = &ops[*index];
67 if((last->type == MUNMAP) &&
68 (last->u.munmap.addr + last->u.mmap.len == addr)){
64 if ((last->type == MUNMAP) &&
65 (last->u.munmap.addr + last->u.mmap.len == addr)) {
69 last->u.munmap.len += len;
70 return 0;
71 }
72 }
73
66 last->u.munmap.len += len;
67 return 0;
68 }
69 }
70
74 if(*index == last_filled){
71 if (*index == last_filled) {
75 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
76 *index = -1;
77 }
78
79 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
80 .u = { .munmap = {
81 .addr = addr,
82 .len = len } } });

--- 4 unchanged lines hidden (view full) ---

87 unsigned int prot, struct host_vm_op *ops, int *index,
88 int last_filled, union mm_context *mmu, void **flush,
89 int (*do_ops)(union mm_context *, struct host_vm_op *,
90 int, int, void **))
91{
92 struct host_vm_op *last;
93 int ret = 0;
94
72 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
73 *index = -1;
74 }
75
76 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
77 .u = { .munmap = {
78 .addr = addr,
79 .len = len } } });

--- 4 unchanged lines hidden (view full) ---

84 unsigned int prot, struct host_vm_op *ops, int *index,
85 int last_filled, union mm_context *mmu, void **flush,
86 int (*do_ops)(union mm_context *, struct host_vm_op *,
87 int, int, void **))
88{
89 struct host_vm_op *last;
90 int ret = 0;
91
95 if(*index != -1){
92 if (*index != -1) {
96 last = &ops[*index];
93 last = &ops[*index];
97 if((last->type == MPROTECT) &&
94 if ((last->type == MPROTECT) &&
98 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
95 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
99 (last->u.mprotect.prot == prot)){
96 (last->u.mprotect.prot == prot)) {
100 last->u.mprotect.len += len;
101 return 0;
102 }
103 }
104
97 last->u.mprotect.len += len;
98 return 0;
99 }
100 }
101
105 if(*index == last_filled){
102 if (*index == last_filled) {
106 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
107 *index = -1;
108 }
109
110 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
111 .u = { .mprotect = {
112 .addr = addr,
113 .len = len,

--- 22 unchanged lines hidden (view full) ---

136 if (!pte_young(*pte)) {
137 r = 0;
138 w = 0;
139 } else if (!pte_dirty(*pte)) {
140 w = 0;
141 }
142 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
143 (x ? UM_PROT_EXEC : 0));
103 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
104 *index = -1;
105 }
106
107 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
108 .u = { .mprotect = {
109 .addr = addr,
110 .len = len,

--- 22 unchanged lines hidden (view full) ---

133 if (!pte_young(*pte)) {
134 r = 0;
135 w = 0;
136 } else if (!pte_dirty(*pte)) {
137 w = 0;
138 }
139 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
140 (x ? UM_PROT_EXEC : 0));
144 if(force || pte_newpage(*pte)){
145 if(pte_present(*pte))
141 if (force || pte_newpage(*pte)) {
142 if (pte_present(*pte))
146 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
147 PAGE_SIZE, prot, ops, op_index,
148 last_op, mmu, flush, do_ops);
149 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
150 last_op, mmu, flush, do_ops);
151 }
143 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
144 PAGE_SIZE, prot, ops, op_index,
145 last_op, mmu, flush, do_ops);
146 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
147 last_op, mmu, flush, do_ops);
148 }
152 else if(pte_newprot(*pte))
149 else if (pte_newprot(*pte))
153 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
154 last_op, mmu, flush, do_ops);
155 *pte = pte_mkuptodate(*pte);
156 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
157 return ret;
158}
159
160static inline int update_pmd_range(pud_t *pud, unsigned long addr,

--- 6 unchanged lines hidden (view full) ---

167{
168 pmd_t *pmd;
169 unsigned long next;
170 int ret = 0;
171
172 pmd = pmd_offset(pud, addr);
173 do {
174 next = pmd_addr_end(addr, end);
150 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
151 last_op, mmu, flush, do_ops);
152 *pte = pte_mkuptodate(*pte);
153 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
154 return ret;
155}
156
157static inline int update_pmd_range(pud_t *pud, unsigned long addr,

--- 6 unchanged lines hidden (view full) ---

164{
165 pmd_t *pmd;
166 unsigned long next;
167 int ret = 0;
168
169 pmd = pmd_offset(pud, addr);
170 do {
171 next = pmd_addr_end(addr, end);
175 if(!pmd_present(*pmd)){
176 if(force || pmd_newpage(*pmd)){
172 if (!pmd_present(*pmd)) {
173 if (force || pmd_newpage(*pmd)) {
177 ret = add_munmap(addr, next - addr, ops,
178 op_index, last_op, mmu,
179 flush, do_ops);
180 pmd_mkuptodate(*pmd);
181 }
182 }
183 else ret = update_pte_range(pmd, addr, next, ops, last_op,
184 op_index, force, mmu, flush,

--- 12 unchanged lines hidden (view full) ---

197{
198 pud_t *pud;
199 unsigned long next;
200 int ret = 0;
201
202 pud = pud_offset(pgd, addr);
203 do {
204 next = pud_addr_end(addr, end);
174 ret = add_munmap(addr, next - addr, ops,
175 op_index, last_op, mmu,
176 flush, do_ops);
177 pmd_mkuptodate(*pmd);
178 }
179 }
180 else ret = update_pte_range(pmd, addr, next, ops, last_op,
181 op_index, force, mmu, flush,

--- 12 unchanged lines hidden (view full) ---

194{
195 pud_t *pud;
196 unsigned long next;
197 int ret = 0;
198
199 pud = pud_offset(pgd, addr);
200 do {
201 next = pud_addr_end(addr, end);
205 if(!pud_present(*pud)){
206 if(force || pud_newpage(*pud)){
202 if (!pud_present(*pud)) {
203 if (force || pud_newpage(*pud)) {
207 ret = add_munmap(addr, next - addr, ops,
208 op_index, last_op, mmu,
209 flush, do_ops);
210 pud_mkuptodate(*pud);
211 }
212 }
213 else ret = update_pmd_range(pud, addr, next, ops, last_op,
214 op_index, force, mmu, flush,

--- 13 unchanged lines hidden (view full) ---

228 unsigned long addr = start_addr, next;
229 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
230 void *flush = NULL;
231
232 ops[0].type = NONE;
233 pgd = pgd_offset(mm, addr);
234 do {
235 next = pgd_addr_end(addr, end_addr);
204 ret = add_munmap(addr, next - addr, ops,
205 op_index, last_op, mmu,
206 flush, do_ops);
207 pud_mkuptodate(*pud);
208 }
209 }
210 else ret = update_pmd_range(pud, addr, next, ops, last_op,
211 op_index, force, mmu, flush,

--- 13 unchanged lines hidden (view full) ---

225 unsigned long addr = start_addr, next;
226 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
227 void *flush = NULL;
228
229 ops[0].type = NONE;
230 pgd = pgd_offset(mm, addr);
231 do {
232 next = pgd_addr_end(addr, end_addr);
236 if(!pgd_present(*pgd)){
237 if (force || pgd_newpage(*pgd)){
233 if (!pgd_present(*pgd)) {
234 if (force || pgd_newpage(*pgd)) {
238 ret = add_munmap(addr, next - addr, ops,
239 &op_index, last_op, mmu,
240 &flush, do_ops);
241 pgd_mkuptodate(*pgd);
242 }
243 }
244 else ret = update_pud_range(pgd, addr, next, ops, last_op,
245 &op_index, force, mmu, &flush,
246 do_ops);
247 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
248
235 ret = add_munmap(addr, next - addr, ops,
236 &op_index, last_op, mmu,
237 &flush, do_ops);
238 pgd_mkuptodate(*pgd);
239 }
240 }
241 else ret = update_pud_range(pgd, addr, next, ops, last_op,
242 &op_index, force, mmu, &flush,
243 do_ops);
244 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
245
249 if(!ret)
246 if (!ret)
250 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
251
252 /* This is not an else because ret is modified above */
247 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
248
249 /* This is not an else because ret is modified above */
253 if(ret) {
254 printk("fix_range_common: failed, killing current process\n");
250 if (ret) {
251 printk(KERN_ERR "fix_range_common: failed, killing current "
252 "process\n");
255 force_sig(SIGKILL, current);
256 }
257}
258
259int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
260{
261 struct mm_struct *mm;
262 pgd_t *pgd;
263 pud_t *pud;
264 pmd_t *pmd;
265 pte_t *pte;
266 unsigned long addr, last;
267 int updated = 0, err;
268
269 mm = &init_mm;
253 force_sig(SIGKILL, current);
254 }
255}
256
257int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
258{
259 struct mm_struct *mm;
260 pgd_t *pgd;
261 pud_t *pud;
262 pmd_t *pmd;
263 pte_t *pte;
264 unsigned long addr, last;
265 int updated = 0, err;
266
267 mm = &init_mm;
270 for(addr = start; addr < end;){
268 for (addr = start; addr < end;) {
271 pgd = pgd_offset(mm, addr);
269 pgd = pgd_offset(mm, addr);
272 if(!pgd_present(*pgd)){
270 if (!pgd_present(*pgd)) {
273 last = ADD_ROUND(addr, PGDIR_SIZE);
271 last = ADD_ROUND(addr, PGDIR_SIZE);
274 if(last > end)
272 if (last > end)
275 last = end;
273 last = end;
276 if(pgd_newpage(*pgd)){
274 if (pgd_newpage(*pgd)) {
277 updated = 1;
278 err = os_unmap_memory((void *) addr,
279 last - addr);
275 updated = 1;
276 err = os_unmap_memory((void *) addr,
277 last - addr);
280 if(err < 0)
278 if (err < 0)
281 panic("munmap failed, errno = %d\n",
282 -err);
283 }
284 addr = last;
285 continue;
286 }
287
288 pud = pud_offset(pgd, addr);
279 panic("munmap failed, errno = %d\n",
280 -err);
281 }
282 addr = last;
283 continue;
284 }
285
286 pud = pud_offset(pgd, addr);
289 if(!pud_present(*pud)){
287 if (!pud_present(*pud)) {
290 last = ADD_ROUND(addr, PUD_SIZE);
288 last = ADD_ROUND(addr, PUD_SIZE);
291 if(last > end)
289 if (last > end)
292 last = end;
290 last = end;
293 if(pud_newpage(*pud)){
291 if (pud_newpage(*pud)) {
294 updated = 1;
295 err = os_unmap_memory((void *) addr,
296 last - addr);
292 updated = 1;
293 err = os_unmap_memory((void *) addr,
294 last - addr);
297 if(err < 0)
295 if (err < 0)
298 panic("munmap failed, errno = %d\n",
299 -err);
300 }
301 addr = last;
302 continue;
303 }
304
305 pmd = pmd_offset(pud, addr);
296 panic("munmap failed, errno = %d\n",
297 -err);
298 }
299 addr = last;
300 continue;
301 }
302
303 pmd = pmd_offset(pud, addr);
306 if(!pmd_present(*pmd)){
304 if (!pmd_present(*pmd)) {
307 last = ADD_ROUND(addr, PMD_SIZE);
305 last = ADD_ROUND(addr, PMD_SIZE);
308 if(last > end)
306 if (last > end)
309 last = end;
307 last = end;
310 if(pmd_newpage(*pmd)){
308 if (pmd_newpage(*pmd)) {
311 updated = 1;
312 err = os_unmap_memory((void *) addr,
313 last - addr);
309 updated = 1;
310 err = os_unmap_memory((void *) addr,
311 last - addr);
314 if(err < 0)
312 if (err < 0)
315 panic("munmap failed, errno = %d\n",
316 -err);
317 }
318 addr = last;
319 continue;
320 }
321
322 pte = pte_offset_kernel(pmd, addr);
313 panic("munmap failed, errno = %d\n",
314 -err);
315 }
316 addr = last;
317 continue;
318 }
319
320 pte = pte_offset_kernel(pmd, addr);
323 if(!pte_present(*pte) || pte_newpage(*pte)){
321 if (!pte_present(*pte) || pte_newpage(*pte)) {
324 updated = 1;
325 err = os_unmap_memory((void *) addr,
326 PAGE_SIZE);
322 updated = 1;
323 err = os_unmap_memory((void *) addr,
324 PAGE_SIZE);
327 if(err < 0)
325 if (err < 0)
328 panic("munmap failed, errno = %d\n",
329 -err);
326 panic("munmap failed, errno = %d\n",
327 -err);
330 if(pte_present(*pte))
328 if (pte_present(*pte))
331 map_memory(addr,
332 pte_val(*pte) & PAGE_MASK,
333 PAGE_SIZE, 1, 1, 1);
334 }
329 map_memory(addr,
330 pte_val(*pte) & PAGE_MASK,
331 PAGE_SIZE, 1, 1, 1);
332 }
335 else if(pte_newprot(*pte)){
333 else if (pte_newprot(*pte)) {
336 updated = 1;
337 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
338 }
339 addr += PAGE_SIZE;
340 }
334 updated = 1;
335 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
336 }
337 addr += PAGE_SIZE;
338 }
341 return(updated);
339 return updated;
342}
343
344void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
345{
346 pgd_t *pgd;
347 pud_t *pud;
348 pmd_t *pmd;
349 pte_t *pte;
350 struct mm_struct *mm = vma->vm_mm;
351 void *flush = NULL;
352 int r, w, x, prot, err = 0;
353 struct mm_id *mm_id;
354
355 address &= PAGE_MASK;
356 pgd = pgd_offset(mm, address);
340}
341
342void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
343{
344 pgd_t *pgd;
345 pud_t *pud;
346 pmd_t *pmd;
347 pte_t *pte;
348 struct mm_struct *mm = vma->vm_mm;
349 void *flush = NULL;
350 int r, w, x, prot, err = 0;
351 struct mm_id *mm_id;
352
353 address &= PAGE_MASK;
354 pgd = pgd_offset(mm, address);
357 if(!pgd_present(*pgd))
355 if (!pgd_present(*pgd))
358 goto kill;
359
360 pud = pud_offset(pgd, address);
356 goto kill;
357
358 pud = pud_offset(pgd, address);
361 if(!pud_present(*pud))
359 if (!pud_present(*pud))
362 goto kill;
363
364 pmd = pmd_offset(pud, address);
360 goto kill;
361
362 pmd = pmd_offset(pud, address);
365 if(!pmd_present(*pmd))
363 if (!pmd_present(*pmd))
366 goto kill;
367
368 pte = pte_offset_kernel(pmd, address);
369
370 r = pte_read(*pte);
371 w = pte_write(*pte);
372 x = pte_exec(*pte);
373 if (!pte_young(*pte)) {
374 r = 0;
375 w = 0;
376 } else if (!pte_dirty(*pte)) {
377 w = 0;
378 }
379
380 mm_id = &mm->context.skas.id;
381 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
382 (x ? UM_PROT_EXEC : 0));
364 goto kill;
365
366 pte = pte_offset_kernel(pmd, address);
367
368 r = pte_read(*pte);
369 w = pte_write(*pte);
370 x = pte_exec(*pte);
371 if (!pte_young(*pte)) {
372 r = 0;
373 w = 0;
374 } else if (!pte_dirty(*pte)) {
375 w = 0;
376 }
377
378 mm_id = &mm->context.skas.id;
379 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
380 (x ? UM_PROT_EXEC : 0));
383 if(pte_newpage(*pte)){
384 if(pte_present(*pte)){
381 if (pte_newpage(*pte)) {
382 if (pte_present(*pte)) {
385 unsigned long long offset;
386 int fd;
387
388 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
389 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
390 1, &flush);
391 }
392 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
393 }
383 unsigned long long offset;
384 int fd;
385
386 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
387 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
388 1, &flush);
389 }
390 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
391 }
394 else if(pte_newprot(*pte))
392 else if (pte_newprot(*pte))
395 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
396
393 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
394
397 if(err)
395 if (err)
398 goto kill;
399
400 *pte = pte_mkuptodate(*pte);
401
402 return;
403
404kill:
396 goto kill;
397
398 *pte = pte_mkuptodate(*pte);
399
400 return;
401
402kill:
405 printk("Failed to flush page for address 0x%lx\n", address);
403 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
406 force_sig(SIGKILL, current);
407}
408
409pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
410{
404 force_sig(SIGKILL, current);
405}
406
407pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
408{
411 return(pgd_offset(mm, address));
409 return pgd_offset(mm, address);
412}
413
414pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
415{
410}
411
412pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
413{
416 return(pud_offset(pgd, address));
414 return pud_offset(pgd, address);
417}
418
419pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
420{
415}
416
417pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
418{
421 return(pmd_offset(pud, address));
419 return pmd_offset(pud, address);
422}
423
424pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
425{
420}
421
422pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
423{
426 return(pte_offset_kernel(pmd, address));
424 return pte_offset_kernel(pmd, address);
427}
428
429pte_t *addr_pte(struct task_struct *task, unsigned long addr)
430{
431 pgd_t *pgd = pgd_offset(task->mm, addr);
432 pud_t *pud = pud_offset(pgd, addr);
433 pmd_t *pmd = pmd_offset(pud, addr);
434
425}
426
427pte_t *addr_pte(struct task_struct *task, unsigned long addr)
428{
429 pgd_t *pgd = pgd_offset(task->mm, addr);
430 pud_t *pud = pud_offset(pgd, addr);
431 pmd_t *pmd = pmd_offset(pud, addr);
432
435 return(pte_offset_map(pmd, addr));
433 return pte_offset_map(pmd, addr);
436}
437
438void flush_tlb_all(void)
439{
440 flush_tlb_mm(current->mm);
441}
442
443void flush_tlb_kernel_range(unsigned long start, unsigned long end)
444{
445 flush_tlb_kernel_range_common(start, end);
446}
447
448void flush_tlb_kernel_vm(void)
449{
450 flush_tlb_kernel_range_common(start_vm, end_vm);
451}
452
453void __flush_tlb_one(unsigned long addr)
454{
434}
435
436void flush_tlb_all(void)
437{
438 flush_tlb_mm(current->mm);
439}
440
441void flush_tlb_kernel_range(unsigned long start, unsigned long end)
442{
443 flush_tlb_kernel_range_common(start, end);
444}
445
446void flush_tlb_kernel_vm(void)
447{
448 flush_tlb_kernel_range_common(start_vm, end_vm);
449}
450
451void __flush_tlb_one(unsigned long addr)
452{
455 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
456}
457
458static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
459 int finished, void **flush)
460{
461 struct host_vm_op *op;
454}
455
456static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
457 int finished, void **flush)
458{
459 struct host_vm_op *op;
462 int i, ret = 0;
460 int i, ret = 0;
463
461
464 for(i = 0; i <= last && !ret; i++){
465 op = &ops[i];
466 switch(op->type){
462 for (i = 0; i <= last && !ret; i++) {
463 op = &ops[i];
464 switch(op->type) {
467 case MMAP:
468 ret = map(&mmu->skas.id, op->u.mmap.addr,
469 op->u.mmap.len, op->u.mmap.prot,
470 op->u.mmap.fd, op->u.mmap.offset, finished,
471 flush);
472 break;
473 case MUNMAP:
474 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
475 op->u.munmap.len, finished, flush);
476 break;
477 case MPROTECT:
478 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
479 op->u.mprotect.len, op->u.mprotect.prot,
480 finished, flush);
481 break;
482 default:
465 case MMAP:
466 ret = map(&mmu->skas.id, op->u.mmap.addr,
467 op->u.mmap.len, op->u.mmap.prot,
468 op->u.mmap.fd, op->u.mmap.offset, finished,
469 flush);
470 break;
471 case MUNMAP:
472 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
473 op->u.munmap.len, finished, flush);
474 break;
475 case MPROTECT:
476 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
477 op->u.mprotect.len, op->u.mprotect.prot,
478 finished, flush);
479 break;
480 default:
483 printk("Unknown op type %d in do_ops\n", op->type);
481 printk(KERN_ERR "Unknown op type %d in do_ops\n",
482 op->type);
484 break;
485 }
486 }
487
488 return ret;
489}
490
491static void fix_range(struct mm_struct *mm, unsigned long start_addr,
492 unsigned long end_addr, int force)
493{
483 break;
484 }
485 }
486
487 return ret;
488}
489
490static void fix_range(struct mm_struct *mm, unsigned long start_addr,
491 unsigned long end_addr, int force)
492{
494 if(!proc_mm && (end_addr > CONFIG_STUB_START))
495 end_addr = CONFIG_STUB_START;
493 if (!proc_mm && (end_addr > CONFIG_STUB_START))
494 end_addr = CONFIG_STUB_START;
496
495
497 fix_range_common(mm, start_addr, end_addr, force, do_ops);
496 fix_range_common(mm, start_addr, end_addr, force, do_ops);
498}
499
500void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
501 unsigned long end)
502{
497}
498
499void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
500 unsigned long end)
501{
503 if(vma->vm_mm == NULL)
504 flush_tlb_kernel_range_common(start, end);
505 else fix_range(vma->vm_mm, start, end, 0);
502 if (vma->vm_mm == NULL)
503 flush_tlb_kernel_range_common(start, end);
504 else fix_range(vma->vm_mm, start, end, 0);
506}
507
508void flush_tlb_mm(struct mm_struct *mm)
509{
510 unsigned long end;
511
505}
506
507void flush_tlb_mm(struct mm_struct *mm)
508{
509 unsigned long end;
510
512 /* Don't bother flushing if this address space is about to be
513 * destroyed.
514 */
515 if(atomic_read(&mm->mm_users) == 0)
516 return;
511 /*
512 * Don't bother flushing if this address space is about to be
513 * destroyed.
514 */
515 if (atomic_read(&mm->mm_users) == 0)
516 return;
517
518 end = proc_mm ? task_size : CONFIG_STUB_START;
517
518 end = proc_mm ? task_size : CONFIG_STUB_START;
519 fix_range(mm, 0, end, 0);
519 fix_range(mm, 0, end, 0);
520}
521
522void force_flush_all(void)
523{
524 struct mm_struct *mm = current->mm;
525 struct vm_area_struct *vma = mm->mmap;
526
520}
521
522void force_flush_all(void)
523{
524 struct mm_struct *mm = current->mm;
525 struct vm_area_struct *vma = mm->mmap;
526
527 while(vma != NULL) {
527 while (vma != NULL) {
528 fix_range(mm, vma->vm_start, vma->vm_end, 1);
529 vma = vma->vm_next;
530 }
531}
528 fix_range(mm, vma->vm_start, vma->vm_end, 1);
529 vma = vma->vm_next;
530 }
531}