/* [<][>][^][v][top][bottom][index][help] */
DEFINITIONS
This source file includes following definitions.
- read_cfg_register
- write_cfg_register
- read_ctl_register
- write_ctl_register
- write_ctlcpu_register
- lamebus_find_cpus
- lamebus_start_cpus
- lamebus_probe
- lamebus_mark
- lamebus_unmark
- lamebus_attach_interrupt
- lamebus_detach_interrupt
- lamebus_mask_interrupt
- lamebus_unmask_interrupt
- lamebus_interrupt
- lamebus_poweroff
- lamebus_ramsize
- lamebus_assert_ipi
- lamebus_clear_ipi
- lamebus_init
1 /*
2 * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
3 * The President and Fellows of Harvard College.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Machine-independent LAMEbus code.
32 */
33
34 #include <types.h>
35 #include <lib.h>
36 #include <cpu.h>
37 #include <spinlock.h>
38 #include <current.h>
39 #include <lamebus/lamebus.h>
40
41 /* Register offsets within each config region */
42 #define CFGREG_VID 0 /* Vendor ID */
43 #define CFGREG_DID 4 /* Device ID */
44 #define CFGREG_DRL 8 /* Device Revision Level */
45
46 /* LAMEbus controller private registers (offsets within its config region) */
47 #define CTLREG_RAMSZ 0x200
48 #define CTLREG_IRQS 0x204
49 #define CTLREG_PWR 0x208
50 #define CTLREG_IRQE 0x20c
51 #define CTLREG_CPUS 0x210
52 #define CTLREG_CPUE 0x214
53 #define CTLREG_SELF 0x218
54
55 /* LAMEbus CPU control registers (offsets within each per-cpu region) */
56 #define CTLCPU_CIRQE 0x000
57 #define CTLCPU_CIPI 0x004
58 #define CTLCPU_CRAM 0x300
59
60
61 /*
62 * Read a config register for the given slot.
63 */
64 static
65 inline
66 uint32_t
67 read_cfg_register(struct lamebus_softc *lb, int slot, uint32_t offset)
68 {
69 /* Note that lb might be NULL on some platforms in some contexts. */
70 offset += LB_CONFIG_SIZE*slot;
71 return lamebus_read_register(lb, LB_CONTROLLER_SLOT, offset);
72 }
73
74 /*
75 * Write a config register for a given slot.
76 */
77 static
78 inline
79 void
80 write_cfg_register(struct lamebus_softc *lb, int slot, uint32_t offset,
81 uint32_t val)
82 {
83 offset += LB_CONFIG_SIZE*slot;
84 lamebus_write_register(lb, LB_CONTROLLER_SLOT, offset, val);
85 }
86
87 /*
88 * Read one of the bus controller's registers.
89 */
90 static
91 inline
92 uint32_t
93 read_ctl_register(struct lamebus_softc *lb, uint32_t offset)
94 {
95 /* Note that lb might be NULL on some platforms in some contexts. */
96 return read_cfg_register(lb, LB_CONTROLLER_SLOT, offset);
97 }
98
99 /*
100 * Write one of the bus controller's registers.
101 */
102 static
103 inline
104 void
105 write_ctl_register(struct lamebus_softc *lb, uint32_t offset, uint32_t val)
106 {
107 write_cfg_register(lb, LB_CONTROLLER_SLOT, offset, val);
108 }
109
110 /*
111 * Write one of the bus controller's CPU control registers.
112 */
113 static
114 inline
115 void
116 write_ctlcpu_register(struct lamebus_softc *lb, unsigned hw_cpunum,
117 uint32_t offset, uint32_t val)
118 {
119 offset += LB_CTLCPU_OFFSET + hw_cpunum * LB_CTLCPU_SIZE;
120 lamebus_write_register(lb, LB_CONTROLLER_SLOT, offset, val);
121 }
122
123 /*
124 * Find and create secondary CPUs.
125 */
126 void
127 lamebus_find_cpus(struct lamebus_softc *lamebus)
128 {
129 uint32_t cpumask, self, bit, val;
130 unsigned i, numcpus, bootcpu;
131 unsigned hwnum[32];
132
133 cpumask = read_ctl_register(lamebus, CTLREG_CPUS);
134 self = read_ctl_register(lamebus, CTLREG_SELF);
135
136 numcpus = 0;
137 bootcpu = 0;
138 for (i=0; i<32; i++) {
139 bit = (uint32_t)1 << i;
140 if ((cpumask & bit) != 0) {
141 if (self & bit) {
142 bootcpu = numcpus;
143 curcpu->c_hardware_number = i;
144 }
145 hwnum[numcpus] = i;
146 numcpus++;
147 }
148 }
149
150 for (i=0; i<numcpus; i++) {
151 if (i != bootcpu) {
152 cpu_create(hwnum[i]);
153 }
154 }
155
156 /*
157 * By default, route all interrupts only to the boot cpu. We
158 * could be arbitrarily more elaborate, up to things like
159 * dynamic load balancing.
160 */
161
162 for (i=0; i<numcpus; i++) {
163 if (i != bootcpu) {
164 val = 0;
165 }
166 else {
167 val = 0xffffffff;
168 }
169 write_ctlcpu_register(lamebus, hwnum[i], CTLCPU_CIRQE, val);
170 }
171 }
172
173 /*
174 * Start up secondary CPUs.
175 *
176 * The first word of the CRAM area is set to the entry point for new
177 * CPUs; the second to the (software) CPU number. Note that the logic
178 * here assumes the boot CPU is CPU 0 and the others are 1-N as
179 * created in the function above. This is fine if all CPUs are on
180 * LAMEbus; if in some environment there are other CPUs about as well
181 * this logic will have to be made more complex.
182 */
183 void
184 lamebus_start_cpus(struct lamebus_softc *lamebus)
185 {
186 uint32_t cpumask, self, bit;
187 uint32_t ctlcpuoffset;
188 uint32_t *cram;
189 unsigned i;
190 unsigned cpunum;
191
192 cpumask = read_ctl_register(lamebus, CTLREG_CPUS);
193 self = read_ctl_register(lamebus, CTLREG_SELF);
194
195 /* Poke in the startup address. */
196 cpunum = 1;
197 for (i=0; i<32; i++) {
198 bit = (uint32_t)1 << i;
199 if ((cpumask & bit) != 0) {
200 if (self & bit) {
201 continue;
202 }
203 ctlcpuoffset = LB_CTLCPU_OFFSET + i * LB_CTLCPU_SIZE;
204 cram = lamebus_map_area(lamebus,
205 LB_CONTROLLER_SLOT,
206 ctlcpuoffset + CTLCPU_CRAM);
207 cram[0] = (uint32_t)cpu_start_secondary;
208 cram[1] = cpunum++;
209 }
210 }
211
212 /* Now, enable them all. */
213 write_ctl_register(lamebus, CTLREG_CPUE, cpumask);
214 }
215
216 /*
217 * Probe function.
218 *
219 * Given a LAMEbus, look for a device that's not already been marked
220 * in use, has the specified IDs, and has a device revision level in
221 * the specified range (which is inclusive on both ends.)
222 *
223 * Returns the slot number found (0-31) or -1 if nothing suitable was
224 * found.
225 */
226
227 int
228 lamebus_probe(struct lamebus_softc *sc,
229 uint32_t vendorid, uint32_t deviceid,
230 uint32_t lowver, uint32_t highver)
231 {
232 int slot;
233 uint32_t val;
234
235 /*
236 * Because the slot information in sc is used when dispatching
237 * interrupts, disable interrupts while working with it.
238 */
239
240 spinlock_acquire(&sc->ls_lock);
241
242 for (slot=0; slot<LB_NSLOTS; slot++) {
243 if (sc->ls_slotsinuse & (1<<slot)) {
244 /* Slot already in use; skip */
245 continue;
246 }
247
248 val = read_cfg_register(sc, slot, CFGREG_VID);
249 if (val!=vendorid) {
250 /* Wrong vendor id */
251 continue;
252 }
253
254 val = read_cfg_register(sc, slot, CFGREG_DID);
255 if (val != deviceid) {
256 /* Wrong device id */
257 continue;
258 }
259
260 val = read_cfg_register(sc, slot, CFGREG_DRL);
261 if (val < lowver || val > highver) {
262 /* Unsupported device revision */
263 continue;
264 }
265
266 /* Found something */
267
268 spinlock_release(&sc->ls_lock);
269 return slot;
270 }
271
272 /* Found nothing */
273
274 spinlock_release(&sc->ls_lock);
275 return -1;
276 }
277
278 /*
279 * Mark that a slot is in use.
280 * This prevents the probe routine from returning the same device over
281 * and over again.
282 */
283 void
284 lamebus_mark(struct lamebus_softc *sc, int slot)
285 {
286 uint32_t mask = ((uint32_t)1) << slot;
287 KASSERT(slot>=0 && slot < LB_NSLOTS);
288
289 spinlock_acquire(&sc->ls_lock);
290
291 if ((sc->ls_slotsinuse & mask)!=0) {
292 panic("lamebus_mark: slot %d already in use\n", slot);
293 }
294
295 sc->ls_slotsinuse |= mask;
296
297 spinlock_release(&sc->ls_lock);
298 }
299
300 /*
301 * Mark that a slot is no longer in use.
302 */
303 void
304 lamebus_unmark(struct lamebus_softc *sc, int slot)
305 {
306 uint32_t mask = ((uint32_t)1) << slot;
307 KASSERT(slot>=0 && slot < LB_NSLOTS);
308
309 spinlock_acquire(&sc->ls_lock);
310
311 if ((sc->ls_slotsinuse & mask)==0) {
312 panic("lamebus_mark: slot %d not marked in use\n", slot);
313 }
314
315 sc->ls_slotsinuse &= ~mask;
316
317 spinlock_release(&sc->ls_lock);
318 }
319
320 /*
321 * Register a function (and a device context pointer) to be called
322 * when a particular slot signals an interrupt.
323 */
324 void
325 lamebus_attach_interrupt(struct lamebus_softc *sc, int slot,
326 void *devdata,
327 void (*irqfunc)(void *devdata))
328 {
329 uint32_t mask = ((uint32_t)1) << slot;
330 KASSERT(slot>=0 && slot < LB_NSLOTS);
331
332 spinlock_acquire(&sc->ls_lock);
333
334 if ((sc->ls_slotsinuse & mask)==0) {
335 panic("lamebus_attach_interrupt: slot %d not marked in use\n",
336 slot);
337 }
338
339 KASSERT(sc->ls_devdata[slot]==NULL);
340 KASSERT(sc->ls_irqfuncs[slot]==NULL);
341
342 sc->ls_devdata[slot] = devdata;
343 sc->ls_irqfuncs[slot] = irqfunc;
344
345 spinlock_release(&sc->ls_lock);
346 }
347
348 /*
349 * Unregister a function that was being called when a particular slot
350 * signaled an interrupt.
351 */
352 void
353 lamebus_detach_interrupt(struct lamebus_softc *sc, int slot)
354 {
355 uint32_t mask = ((uint32_t)1) << slot;
356 KASSERT(slot>=0 && slot < LB_NSLOTS);
357
358 spinlock_acquire(&sc->ls_lock);
359
360 if ((sc->ls_slotsinuse & mask)==0) {
361 panic("lamebus_detach_interrupt: slot %d not marked in use\n",
362 slot);
363 }
364
365 KASSERT(sc->ls_irqfuncs[slot]!=NULL);
366
367 sc->ls_devdata[slot] = NULL;
368 sc->ls_irqfuncs[slot] = NULL;
369
370 spinlock_release(&sc->ls_lock);
371 }
372
373 /*
374 * Mask/unmask an interrupt using the global IRQE register.
375 */
376 void
377 lamebus_mask_interrupt(struct lamebus_softc *lamebus, int slot)
378 {
379 uint32_t bits, mask = ((uint32_t)1) << slot;
380 KASSERT(slot >= 0 && slot < LB_NSLOTS);
381
382 spinlock_acquire(&lamebus->ls_lock);
383 bits = read_ctl_register(lamebus, CTLREG_IRQE);
384 bits &= ~mask;
385 write_ctl_register(lamebus, CTLREG_IRQE, bits);
386 spinlock_release(&lamebus->ls_lock);
387 }
388
389 void
390 lamebus_unmask_interrupt(struct lamebus_softc *lamebus, int slot)
391 {
392 uint32_t bits, mask = ((uint32_t)1) << slot;
393 KASSERT(slot >= 0 && slot < LB_NSLOTS);
394
395 spinlock_acquire(&lamebus->ls_lock);
396 bits = read_ctl_register(lamebus, CTLREG_IRQE);
397 bits |= mask;
398 write_ctl_register(lamebus, CTLREG_IRQE, bits);
399 spinlock_release(&lamebus->ls_lock);
400 }
401
402
403 /*
404 * LAMEbus interrupt handling function. (Machine-independent!)
405 */
406 void
407 lamebus_interrupt(struct lamebus_softc *lamebus)
408 {
409 /*
410 * Note that despite the fact that "spl" stands for "set
411 * priority level", we don't actually support interrupt
412 * priorities. When an interrupt happens, we look through the
413 * slots to find the first interrupting device and call its
414 * interrupt routine, no matter what that device is.
415 *
416 * Note that the entire LAMEbus uses only one on-cpu interrupt line.
417 * Thus, we do not use any on-cpu interrupt priority system either.
418 */
419
420 int slot;
421 uint32_t mask;
422 uint32_t irqs;
423 void (*handler)(void *);
424 void *data;
425
426 /* For keeping track of how many bogus things happen in a row. */
427 static int duds = 0;
428 int duds_this_time = 0;
429
430 /* and we better have a valid bus instance. */
431 KASSERT(lamebus != NULL);
432
433 /* Lock the softc */
434 spinlock_acquire(&lamebus->ls_lock);
435
436 /*
437 * Read the LAMEbus controller register that tells us which
438 * slots are asserting an interrupt condition.
439 */
440 irqs = read_ctl_register(lamebus, CTLREG_IRQS);
441
442 if (irqs == 0) {
443 /*
444 * Huh? None of them? Must be a glitch.
445 */
446 kprintf("lamebus: stray interrupt on cpu %u\n",
447 curcpu->c_number);
448 duds++;
449 duds_this_time++;
450
451 /*
452 * We could just return now, but instead we'll
453 * continue ahead. Because irqs == 0, nothing in the
454 * loop will execute, and passing through it gets us
455 * to the code that checks how many duds we've
456 * seen. This is important, because we just might get
457 * a stray interrupt that latches itself on. If that
458 * happens, we're pretty much toast, but it's better
459 * to panic and hopefully reset the system than to
460 * loop forever printing "stray interrupt".
461 */
462 }
463
464 /*
465 * Go through the bits in the value we got back to see which
466 * ones are set.
467 */
468
469 for (mask=1, slot=0; slot<LB_NSLOTS; mask<<=1, slot++) {
470 if ((irqs & mask) == 0) {
471 /* Nope. */
472 continue;
473 }
474
475 /*
476 * This slot is signalling an interrupt.
477 */
478
479 if ((lamebus->ls_slotsinuse & mask)==0) {
480 /*
481 * No device driver is using this slot.
482 */
483 duds++;
484 duds_this_time++;
485 continue;
486 }
487
488 if (lamebus->ls_irqfuncs[slot]==NULL) {
489 /*
490 * The device driver hasn't installed an interrupt
491 * handler.
492 */
493 duds++;
494 duds_this_time++;
495 continue;
496 }
497
498 /*
499 * Call the interrupt handler. Release the spinlock
500 * while we do so, in case other CPUs are handling
501 * interrupts on other devices.
502 */
503 handler = lamebus->ls_irqfuncs[slot];
504 data = lamebus->ls_devdata[slot];
505 spinlock_release(&lamebus->ls_lock);
506
507 handler(data);
508
509 spinlock_acquire(&lamebus->ls_lock);
510
511 /*
512 * Reload the mask of pending IRQs - if we just called
513 * hardclock, we might not have come back to this
514 * context for some time, and it might have changed.
515 */
516
517 irqs = read_ctl_register(lamebus, CTLREG_IRQS);
518 }
519
520
521 /*
522 * If we get interrupts for a slot with no driver or no
523 * interrupt handler, it's fairly serious. Because LAMEbus
524 * uses level-triggered interrupts, if we don't shut off the
525 * condition, we'll keep getting interrupted continuously and
526 * the system will make no progress. But we don't know how to
527 * do that if there's no driver or no interrupt handler.
528 *
529 * So, if we get too many dud interrupts, panic, since it's
530 * better to panic and reset than to hang.
531 *
532 * If we get through here without seeing any duds this time,
533 * the condition, whatever it was, has gone away. It might be
534 * some stupid device we don't have a driver for, or it might
535 * have been an electrical transient. In any case, warn and
536 * clear the dud count.
537 */
538
539 if (duds_this_time == 0 && duds > 0) {
540 kprintf("lamebus: %d dud interrupts\n", duds);
541 duds = 0;
542 }
543
544 if (duds > 10000) {
545 panic("lamebus: too many (%d) dud interrupts\n", duds);
546 }
547
548 /* Unlock the softc */
549 spinlock_release(&lamebus->ls_lock);
550 }
551
552 /*
553 * Have the bus controller power the system off.
554 */
555 void
556 lamebus_poweroff(struct lamebus_softc *lamebus)
557 {
558 /*
559 * Write 0 to the power register to shut the system off.
560 */
561
562 cpu_irqoff();
563 write_ctl_register(lamebus, CTLREG_PWR, 0);
564
565 /* The power doesn't go off instantly... so halt the cpu. */
566 cpu_halt();
567 }
568
569 /*
570 * Ask the bus controller how much memory we have.
571 */
572 uint32_t
573 lamebus_ramsize(void)
574 {
575 /*
576 * Note that this has to work before bus initialization.
577 * On machines where lamebus_read_register doesn't work
578 * before bus initialization, this function can't be used
579 * for initial RAM size lookup.
580 */
581
582 return read_ctl_register(NULL, CTLREG_RAMSZ);
583 }
584
585 /*
586 * Turn on or off the interprocessor interrupt line for a given CPU.
587 */
588 void
589 lamebus_assert_ipi(struct lamebus_softc *lamebus, struct cpu *target)
590 {
591 write_ctlcpu_register(lamebus, target->c_hardware_number,
592 CTLCPU_CIPI, 1);
593 }
594
595 void
596 lamebus_clear_ipi(struct lamebus_softc *lamebus, struct cpu *target)
597 {
598 write_ctlcpu_register(lamebus, target->c_hardware_number,
599 CTLCPU_CIPI, 0);
600 }
601
602 /*
603 * Initial setup.
604 * Should be called from mainbus_bootstrap().
605 */
606 struct lamebus_softc *
607 lamebus_init(void)
608 {
609 struct lamebus_softc *lamebus;
610 int i;
611
612 /* Allocate space for lamebus data */
613 lamebus = kmalloc(sizeof(struct lamebus_softc));
614 if (lamebus==NULL) {
615 panic("lamebus_init: Out of memory\n");
616 }
617
618 spinlock_init(&lamebus->ls_lock);
619
620 /*
621 * Initialize the LAMEbus data structure.
622 */
623 lamebus->ls_slotsinuse = 1 << LB_CONTROLLER_SLOT;
624
625 for (i=0; i<LB_NSLOTS; i++) {
626 lamebus->ls_devdata[i] = NULL;
627 lamebus->ls_irqfuncs[i] = NULL;
628 }
629
630 return lamebus;
631 }