/* [<][>][^][v][top][bottom][index][help] */
DEFINITIONS
This source file includes following definitions.
- vm_bootstrap
- getppages
- alloc_kpages
- free_kpages
- vm_tlbshootdown_all
- vm_tlbshootdown
- vm_fault
- as_create
- as_destroy
- as_activate
- as_deactivate
- as_define_region
- as_zero_region
- as_prepare_load
- as_complete_load
- as_define_stack
- as_copy
1 /*
2 * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
3 * The President and Fellows of Harvard College.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <types.h>
31 #include <kern/errno.h>
32 #include <lib.h>
33 #include <spl.h>
34 #include <spinlock.h>
35 #include <proc.h>
36 #include <current.h>
37 #include <mips/tlb.h>
38 #include <addrspace.h>
39 #include <vm.h>
40
41 /*
42 * Dumb MIPS-only "VM system" that is intended to only be just barely
43 * enough to struggle off the ground. You should replace all of this
44 * code while doing the VM assignment. In fact, starting in that
45 * assignment, this file is not included in your kernel!
46 */
47
48 /* under dumbvm, always have 48k of user stack */
49 #define DUMBVM_STACKPAGES 12
50
51 /*
52 * Wrap rma_stealmem in a spinlock.
53 */
54 static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
55
56 void
57 vm_bootstrap(void)
58 {
59 /* Do nothing. */
60 }
61
62 static
63 paddr_t
64 getppages(unsigned long npages)
65 {
66 paddr_t addr;
67
68 spinlock_acquire(&stealmem_lock);
69
70 addr = ram_stealmem(npages);
71
72 spinlock_release(&stealmem_lock);
73 return addr;
74 }
75
76 /* Allocate/free some kernel-space virtual pages */
77 vaddr_t
78 alloc_kpages(int npages)
79 {
80 paddr_t pa;
81 pa = getppages(npages);
82 if (pa==0) {
83 return 0;
84 }
85 return PADDR_TO_KVADDR(pa);
86 }
87
88 void
89 free_kpages(vaddr_t addr)
90 {
91 /* nothing - leak the memory. */
92
93 (void)addr;
94 }
95
96 void
97 vm_tlbshootdown_all(void)
98 {
99 panic("dumbvm tried to do tlb shootdown?!\n");
100 }
101
102 void
103 vm_tlbshootdown(const struct tlbshootdown *ts)
104 {
105 (void)ts;
106 panic("dumbvm tried to do tlb shootdown?!\n");
107 }
108
109 int
110 vm_fault(int faulttype, vaddr_t faultaddress)
111 {
112 vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
113 paddr_t paddr;
114 int i;
115 uint32_t ehi, elo;
116 struct addrspace *as;
117 int spl;
118
119 faultaddress &= PAGE_FRAME;
120
121 DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
122
123 switch (faulttype) {
124 case VM_FAULT_READONLY:
125 /* We always create pages read-write, so we can't get this */
126 panic("dumbvm: got VM_FAULT_READONLY\n");
127 case VM_FAULT_READ:
128 case VM_FAULT_WRITE:
129 break;
130 default:
131 return EINVAL;
132 }
133
134 if (curproc == NULL) {
135 /*
136 * No process. This is probably a kernel fault early
137 * in boot. Return EFAULT so as to panic instead of
138 * getting into an infinite faulting loop.
139 */
140 return EFAULT;
141 }
142
143 as = curproc_getas();
144 if (as == NULL) {
145 /*
146 * No address space set up. This is probably also a
147 * kernel fault early in boot.
148 */
149 return EFAULT;
150 }
151
152 /* Assert that the address space has been set up properly. */
153 KASSERT(as->as_vbase1 != 0);
154 KASSERT(as->as_pbase1 != 0);
155 KASSERT(as->as_npages1 != 0);
156 KASSERT(as->as_vbase2 != 0);
157 KASSERT(as->as_pbase2 != 0);
158 KASSERT(as->as_npages2 != 0);
159 KASSERT(as->as_stackpbase != 0);
160 KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
161 KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
162 KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
163 KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
164 KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
165
166 vbase1 = as->as_vbase1;
167 vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
168 vbase2 = as->as_vbase2;
169 vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
170 stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
171 stacktop = USERSTACK;
172
173 if (faultaddress >= vbase1 && faultaddress < vtop1) {
174 paddr = (faultaddress - vbase1) + as->as_pbase1;
175 }
176 else if (faultaddress >= vbase2 && faultaddress < vtop2) {
177 paddr = (faultaddress - vbase2) + as->as_pbase2;
178 }
179 else if (faultaddress >= stackbase && faultaddress < stacktop) {
180 paddr = (faultaddress - stackbase) + as->as_stackpbase;
181 }
182 else {
183 return EFAULT;
184 }
185
186 /* make sure it's page-aligned */
187 KASSERT((paddr & PAGE_FRAME) == paddr);
188
189 /* Disable interrupts on this CPU while frobbing the TLB. */
190 spl = splhigh();
191
192 for (i=0; i<NUM_TLB; i++) {
193 tlb_read(&ehi, &elo, i);
194 if (elo & TLBLO_VALID) {
195 continue;
196 }
197 ehi = faultaddress;
198 elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
199 DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
200 tlb_write(ehi, elo, i);
201 splx(spl);
202 return 0;
203 }
204
205 kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
206 splx(spl);
207 return EFAULT;
208 }
209
210 struct addrspace *
211 as_create(void)
212 {
213 struct addrspace *as = kmalloc(sizeof(struct addrspace));
214 if (as==NULL) {
215 return NULL;
216 }
217
218 as->as_vbase1 = 0;
219 as->as_pbase1 = 0;
220 as->as_npages1 = 0;
221 as->as_vbase2 = 0;
222 as->as_pbase2 = 0;
223 as->as_npages2 = 0;
224 as->as_stackpbase = 0;
225
226 return as;
227 }
228
229 void
230 as_destroy(struct addrspace *as)
231 {
232 kfree(as);
233 }
234
235 void
236 as_activate(void)
237 {
238 int i, spl;
239 struct addrspace *as;
240
241 as = curproc_getas();
242 #ifdef UW
243 /* Kernel threads don't have an address spaces to activate */
244 #endif
245 if (as == NULL) {
246 return;
247 }
248
249 /* Disable interrupts on this CPU while frobbing the TLB. */
250 spl = splhigh();
251
252 for (i=0; i<NUM_TLB; i++) {
253 tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
254 }
255
256 splx(spl);
257 }
258
259 void
260 as_deactivate(void)
261 {
262 /* nothing */
263 }
264
265 int
266 as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
267 int readable, int writeable, int executable)
268 {
269 size_t npages;
270
271 /* Align the region. First, the base... */
272 sz += vaddr & ~(vaddr_t)PAGE_FRAME;
273 vaddr &= PAGE_FRAME;
274
275 /* ...and now the length. */
276 sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
277
278 npages = sz / PAGE_SIZE;
279
280 /* We don't use these - all pages are read-write */
281 (void)readable;
282 (void)writeable;
283 (void)executable;
284
285 if (as->as_vbase1 == 0) {
286 as->as_vbase1 = vaddr;
287 as->as_npages1 = npages;
288 return 0;
289 }
290
291 if (as->as_vbase2 == 0) {
292 as->as_vbase2 = vaddr;
293 as->as_npages2 = npages;
294 return 0;
295 }
296
297 /*
298 * Support for more than two regions is not available.
299 */
300 kprintf("dumbvm: Warning: too many regions\n");
301 return EUNIMP;
302 }
303
304 static
305 void
306 as_zero_region(paddr_t paddr, unsigned npages)
307 {
308 bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
309 }
310
311 int
312 as_prepare_load(struct addrspace *as)
313 {
314 KASSERT(as->as_pbase1 == 0);
315 KASSERT(as->as_pbase2 == 0);
316 KASSERT(as->as_stackpbase == 0);
317
318 as->as_pbase1 = getppages(as->as_npages1);
319 if (as->as_pbase1 == 0) {
320 return ENOMEM;
321 }
322
323 as->as_pbase2 = getppages(as->as_npages2);
324 if (as->as_pbase2 == 0) {
325 return ENOMEM;
326 }
327
328 as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
329 if (as->as_stackpbase == 0) {
330 return ENOMEM;
331 }
332
333 as_zero_region(as->as_pbase1, as->as_npages1);
334 as_zero_region(as->as_pbase2, as->as_npages2);
335 as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
336
337 return 0;
338 }
339
340 int
341 as_complete_load(struct addrspace *as)
342 {
343 (void)as;
344 return 0;
345 }
346
347 int
348 as_define_stack(struct addrspace *as, vaddr_t *stackptr)
349 {
350 KASSERT(as->as_stackpbase != 0);
351
352 *stackptr = USERSTACK;
353 return 0;
354 }
355
356 int
357 as_copy(struct addrspace *old, struct addrspace **ret)
358 {
359 struct addrspace *new;
360
361 new = as_create();
362 if (new==NULL) {
363 return ENOMEM;
364 }
365
366 new->as_vbase1 = old->as_vbase1;
367 new->as_npages1 = old->as_npages1;
368 new->as_vbase2 = old->as_vbase2;
369 new->as_npages2 = old->as_npages2;
370
371 /* (Mis)use as_prepare_load to allocate some physical memory. */
372 if (as_prepare_load(new)) {
373 as_destroy(new);
374 return ENOMEM;
375 }
376
377 KASSERT(new->as_pbase1 != 0);
378 KASSERT(new->as_pbase2 != 0);
379 KASSERT(new->as_stackpbase != 0);
380
381 memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
382 (const void *)PADDR_TO_KVADDR(old->as_pbase1),
383 old->as_npages1*PAGE_SIZE);
384
385 memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
386 (const void *)PADDR_TO_KVADDR(old->as_pbase2),
387 old->as_npages2*PAGE_SIZE);
388
389 memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
390 (const void *)PADDR_TO_KVADDR(old->as_stackpbase),
391 DUMBVM_STACKPAGES*PAGE_SIZE);
392
393 *ret = new;
394 return 0;
395 }