00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034 #define THREADINLINE
00035
00036 #include <types.h>
00037 #include <kern/errno.h>
00038 #include <lib.h>
00039 #include <array.h>
00040 #include <cpu.h>
00041 #include <spl.h>
00042 #include <spinlock.h>
00043 #include <wchan.h>
00044 #include <thread.h>
00045 #include <threadlist.h>
00046 #include <threadprivate.h>
00047 #include <proc.h>
00048 #include <current.h>
00049 #include <synch.h>
00050 #include <addrspace.h>
00051 #include <mainbus.h>
00052 #include <vnode.h>
00053
00054 #include "opt-synchprobs.h"
00055
00056
00057
00058 #define THREAD_STACK_MAGIC 0xbaadf00d
00059
00060
00061 struct wchan {
00062 const char *wc_name;
00063 struct threadlist wc_threads;
00064 struct spinlock wc_lock;
00065 };
00066
00067
00068 DECLARRAY(cpu);
00069 DEFARRAY(cpu, );
00070 static struct cpuarray allcpus;
00071
00072
00073 static struct semaphore *cpu_startup_sem;
00074
00075
00076
00077
00078
00079
00080
00081
00082 static
00083 void
00084 thread_checkstack_init(struct thread *thread)
00085 {
00086 ((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC;
00087 ((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC;
00088 ((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC;
00089 ((uint32_t *)thread->t_stack)[3] = THREAD_STACK_MAGIC;
00090 }
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102 static
00103 void
00104 thread_checkstack(struct thread *thread)
00105 {
00106 if (thread->t_stack != NULL) {
00107 KASSERT(((uint32_t*)thread->t_stack)[0] == THREAD_STACK_MAGIC);
00108 KASSERT(((uint32_t*)thread->t_stack)[1] == THREAD_STACK_MAGIC);
00109 KASSERT(((uint32_t*)thread->t_stack)[2] == THREAD_STACK_MAGIC);
00110 KASSERT(((uint32_t*)thread->t_stack)[3] == THREAD_STACK_MAGIC);
00111 }
00112 }
00113
00114
00115
00116
00117
00118 static
00119 struct thread *
00120 thread_create(const char *name)
00121 {
00122 struct thread *thread;
00123
00124 DEBUGASSERT(name != NULL);
00125
00126 thread = kmalloc(sizeof(*thread));
00127 if (thread == NULL) {
00128 return NULL;
00129 }
00130
00131 thread->t_name = kstrdup(name);
00132 if (thread->t_name == NULL) {
00133 kfree(thread);
00134 return NULL;
00135 }
00136 thread->t_wchan_name = "NEW";
00137 thread->t_state = S_READY;
00138
00139
00140 thread_machdep_init(&thread->t_machdep);
00141 threadlistnode_init(&thread->t_listnode, thread);
00142 thread->t_stack = NULL;
00143 thread->t_context = NULL;
00144 thread->t_cpu = NULL;
00145 thread->t_proc = NULL;
00146
00147
00148 thread->t_in_interrupt = false;
00149 thread->t_curspl = IPL_HIGH;
00150 thread->t_iplhigh_count = 1;
00151
00152
00153
00154 return thread;
00155 }
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165 struct cpu *
00166 cpu_create(unsigned hardware_number)
00167 {
00168 struct cpu *c;
00169 int result;
00170 char namebuf[16];
00171
00172 c = kmalloc(sizeof(*c));
00173 if (c == NULL) {
00174 panic("cpu_create: Out of memory\n");
00175 }
00176
00177 c->c_self = c;
00178 c->c_hardware_number = hardware_number;
00179
00180 c->c_curthread = NULL;
00181 threadlist_init(&c->c_zombies);
00182 c->c_hardclocks = 0;
00183
00184 c->c_isidle = false;
00185 threadlist_init(&c->c_runqueue);
00186 spinlock_init(&c->c_runqueue_lock);
00187
00188 c->c_ipi_pending = 0;
00189 c->c_numshootdown = 0;
00190 spinlock_init(&c->c_ipi_lock);
00191
00192 result = cpuarray_add(&allcpus, c, &c->c_number);
00193 if (result != 0) {
00194 panic("cpu_create: array_add: %s\n", strerror(result));
00195 }
00196
00197 snprintf(namebuf, sizeof(namebuf), "<boot #%d>", c->c_number);
00198 c->c_curthread = thread_create(namebuf);
00199 if (c->c_curthread == NULL) {
00200 panic("cpu_create: thread_create failed\n");
00201 }
00202 result = proc_addthread(kproc, c->c_curthread);
00203 if (result) {
00204 panic("cpu_create: proc_addthread:: %s\n", strerror(result));
00205 }
00206
00207 if (c->c_number == 0) {
00208
00209
00210
00211
00212
00213
00214
00215 }
00216 else {
00217 c->c_curthread->t_stack = kmalloc(STACK_SIZE);
00218 if (c->c_curthread->t_stack == NULL) {
00219 panic("cpu_create: couldn't allocate stack");
00220 }
00221 thread_checkstack_init(c->c_curthread);
00222 }
00223 c->c_curthread->t_cpu = c;
00224
00225 cpu_machdep_init(c);
00226
00227 return c;
00228 }
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238 static
00239 void
00240 thread_destroy(struct thread *thread)
00241 {
00242 KASSERT(thread != curthread);
00243 KASSERT(thread->t_state != S_RUN);
00244
00245
00246
00247
00248
00249
00250
00251 KASSERT(thread->t_proc == NULL);
00252 if (thread->t_stack != NULL) {
00253 kfree(thread->t_stack);
00254 }
00255 threadlistnode_cleanup(&thread->t_listnode);
00256 thread_machdep_cleanup(&thread->t_machdep);
00257
00258
00259 thread->t_wchan_name = "DESTROYED";
00260
00261 kfree(thread->t_name);
00262 kfree(thread);
00263 }
00264
00265
00266
00267
00268
00269
00270
00271 static
00272 void
00273 exorcise(void)
00274 {
00275 struct thread *z;
00276
00277 while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) {
00278 KASSERT(z != curthread);
00279 KASSERT(z->t_state == S_ZOMBIE);
00280 thread_destroy(z);
00281 }
00282 }
00283
00284
00285
00286
00287
00288
00289 void
00290 thread_panic(void)
00291 {
00292
00293
00294
00295
00296
00297 ipi_broadcast(IPI_PANIC);
00298
00299
00300
00301
00302
00303
00304
00305
00306 curcpu->c_runqueue.tl_count = 0;
00307 curcpu->c_runqueue.tl_head.tln_next = NULL;
00308 curcpu->c_runqueue.tl_tail.tln_prev = NULL;
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326 }
00327
00328
00329
00330
00331 void
00332 thread_shutdown(void)
00333 {
00334
00335
00336
00337
00338
00339
00340 ipi_broadcast(IPI_OFFLINE);
00341 }
00342
00343
00344
00345
00346 void
00347 thread_bootstrap(void)
00348 {
00349 struct cpu *bootcpu;
00350 struct thread *bootthread;
00351
00352 cpuarray_init(&allcpus);
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362 bootcpu = cpu_create(0);
00363 bootthread = bootcpu->c_curthread;
00364
00365
00366
00367
00368
00369
00370 INIT_CURCPU(bootcpu, bootthread);
00371
00372
00373
00374
00375
00376
00377 curthread->t_cpu = curcpu;
00378 curcpu->c_curthread = curthread;
00379
00380
00381 KASSERT(curthread->t_proc != NULL);
00382
00383
00384 }
00385
00386
00387
00388
00389
00390
00391
00392
00393
00394 void
00395 cpu_hatch(unsigned software_number)
00396 {
00397 KASSERT(curcpu != NULL);
00398 KASSERT(curthread != NULL);
00399 KASSERT(curcpu->c_number == software_number);
00400
00401 spl0();
00402
00403 kprintf("cpu%u: %s\n", software_number, cpu_identify());
00404
00405 V(cpu_startup_sem);
00406 thread_exit();
00407 }
00408
00409
00410
00411
00412 void
00413 thread_start_cpus(void)
00414 {
00415 unsigned i;
00416
00417 kprintf("cpu0: %s\n", cpu_identify());
00418
00419 cpu_startup_sem = sem_create("cpu_hatch", 0);
00420 mainbus_start_cpus();
00421
00422 for (i=0; i<cpuarray_num(&allcpus) - 1; i++) {
00423 P(cpu_startup_sem);
00424 }
00425 sem_destroy(cpu_startup_sem);
00426 cpu_startup_sem = NULL;
00427 }
00428
00429
00430
00431
00432
00433
00434 static
00435 void
00436 thread_make_runnable(struct thread *target, bool already_have_lock)
00437 {
00438 struct cpu *targetcpu;
00439 bool isidle;
00440
00441
00442 targetcpu = target->t_cpu;
00443
00444 if (already_have_lock) {
00445
00446 KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock));
00447 }
00448 else {
00449 spinlock_acquire(&targetcpu->c_runqueue_lock);
00450 }
00451
00452 isidle = targetcpu->c_isidle;
00453 threadlist_addtail(&targetcpu->c_runqueue, target);
00454 if (isidle) {
00455
00456
00457
00458
00459 ipi_send(targetcpu, IPI_UNIDLE);
00460 }
00461
00462 if (!already_have_lock) {
00463 spinlock_release(&targetcpu->c_runqueue_lock);
00464 }
00465 }
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477 int
00478 thread_fork(const char *name,
00479 struct proc *proc,
00480 void (*entrypoint)(void *data1, unsigned long data2),
00481 void *data1, unsigned long data2)
00482 {
00483 struct thread *newthread;
00484 int result;
00485
00486 #ifdef UW
00487 DEBUG(DB_THREADS,"Forking thread: %s\n",name);
00488 #endif // UW
00489
00490 newthread = thread_create(name);
00491 if (newthread == NULL) {
00492 return ENOMEM;
00493 }
00494
00495
00496 newthread->t_stack = kmalloc(STACK_SIZE);
00497 if (newthread->t_stack == NULL) {
00498 thread_destroy(newthread);
00499 return ENOMEM;
00500 }
00501 thread_checkstack_init(newthread);
00502
00503
00504
00505
00506
00507
00508 newthread->t_cpu = curthread->t_cpu;
00509
00510
00511 if (proc == NULL) {
00512 proc = curthread->t_proc;
00513 }
00514 result = proc_addthread(proc, newthread);
00515 if (result) {
00516
00517 thread_destroy(newthread);
00518 return result;
00519 }
00520
00521
00522
00523
00524
00525
00526 newthread->t_iplhigh_count++;
00527
00528
00529 switchframe_init(newthread, entrypoint, data1, data2);
00530
00531
00532 thread_make_runnable(newthread, false);
00533
00534 return 0;
00535 }
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545
00546 static
00547 void
00548 thread_switch(threadstate_t newstate, struct wchan *wc)
00549 {
00550 struct thread *cur, *next;
00551 int spl;
00552
00553 DEBUGASSERT(curcpu->c_curthread == curthread);
00554 DEBUGASSERT(curthread->t_cpu == curcpu->c_self);
00555
00556
00557 spl = splhigh();
00558
00559 cur = curthread;
00560
00561
00562
00563
00564
00565 if (curcpu->c_isidle) {
00566 splx(spl);
00567 return;
00568 }
00569
00570
00571 thread_checkstack(cur);
00572
00573
00574 spinlock_acquire(&curcpu->c_runqueue_lock);
00575
00576
00577 if (newstate == S_READY && threadlist_isempty(&curcpu->c_runqueue)) {
00578 spinlock_release(&curcpu->c_runqueue_lock);
00579 splx(spl);
00580 return;
00581 }
00582
00583
00584 switch (newstate) {
00585 case S_RUN:
00586 panic("Illegal S_RUN in thread_switch\n");
00587 case S_READY:
00588 thread_make_runnable(cur, true );
00589 break;
00590 case S_SLEEP:
00591 cur->t_wchan_name = wc->wc_name;
00592
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605 threadlist_addtail(&wc->wc_threads, cur);
00606 wchan_unlock(wc);
00607 break;
00608 case S_ZOMBIE:
00609 cur->t_wchan_name = "ZOMBIE";
00610 threadlist_addtail(&curcpu->c_zombies, cur);
00611 break;
00612 }
00613 cur->t_state = newstate;
00614
00615
00616
00617
00618
00619
00620
00621
00622
00623
00624
00625
00626
00627
00628
00629
00630
00631
00632
00633 curcpu->c_isidle = true;
00634 do {
00635 next = threadlist_remhead(&curcpu->c_runqueue);
00636 if (next == NULL) {
00637 spinlock_release(&curcpu->c_runqueue_lock);
00638 cpu_idle();
00639 spinlock_acquire(&curcpu->c_runqueue_lock);
00640 }
00641 } while (next == NULL);
00642 curcpu->c_isidle = false;
00643
00644
00645
00646
00647
00648
00649
00650
00651 curcpu->c_curthread = next;
00652 curthread = next;
00653
00654
00655 switchframe_switch(&cur->t_context, &next->t_context);
00656
00657
00658
00659
00660
00661
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690
00691
00692
00693
00694
00695
00696
00697
00698
00699
00700
00701
00702
00703
00704
00705 cur->t_wchan_name = NULL;
00706 cur->t_state = S_RUN;
00707
00708
00709 spinlock_release(&curcpu->c_runqueue_lock);
00710
00711
00712 as_activate();
00713
00714
00715 exorcise();
00716
00717
00718 splx(spl);
00719 }
00720
00721
00722
00723
00724
00725
00726
00727
00728
00729 void
00730 thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
00731 void *data1, unsigned long data2)
00732 {
00733 struct thread *cur;
00734
00735 cur = curthread;
00736
00737
00738 cur->t_wchan_name = NULL;
00739 cur->t_state = S_RUN;
00740
00741
00742 spinlock_release(&curcpu->c_runqueue_lock);
00743
00744
00745 as_activate();
00746
00747
00748 exorcise();
00749
00750
00751 spl0();
00752
00753 #if OPT_SYNCHPROBS
00754
00755 {
00756 int i, n;
00757 n = random()%161 + random()%161;
00758 for (i=0; i<n; i++) {
00759 thread_yield();
00760 }
00761 }
00762 #endif
00763
00764
00765 entrypoint(data1, data2);
00766
00767
00768 thread_exit();
00769 }
00770
00771
00772
00773
00774
00775
00776
00777
00778
00779
00780 void
00781 thread_exit(void)
00782 {
00783 struct thread *cur;
00784
00785 cur = curthread;
00786
00787 #ifdef UW
00788
00789
00790 KASSERT(curproc == kproc || curproc == NULL);
00791
00792 if (curproc == kproc) {
00793 proc_remthread(cur);
00794 }
00795 #else // UW
00796 proc_remthread(cur);
00797 #endif // UW
00798
00799
00800 KASSERT(cur->t_proc == NULL);
00801
00802
00803 thread_checkstack(cur);
00804
00805
00806 splhigh();
00807 thread_switch(S_ZOMBIE, NULL);
00808 panic("The zombie walks!\n");
00809 }
00810
00811
00812
00813
00814 void
00815 thread_yield(void)
00816 {
00817 thread_switch(S_READY, NULL);
00818 }
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828
00829 void
00830 schedule(void)
00831 {
00832
00833
00834
00835
00836 }
00837
00838
00839
00840
00841
00842
00843
00844
00845
00846
00847
00848
00849
00850
00851
00852
00853
00854
00855 void
00856 thread_consider_migration(void)
00857 {
00858 unsigned my_count, total_count, one_share, to_send;
00859 unsigned i, numcpus;
00860 struct cpu *c;
00861 struct threadlist victims;
00862 struct thread *t;
00863
00864 my_count = total_count = 0;
00865 numcpus = cpuarray_num(&allcpus);
00866 for (i=0; i<numcpus; i++) {
00867 c = cpuarray_get(&allcpus, i);
00868 spinlock_acquire(&c->c_runqueue_lock);
00869 total_count += c->c_runqueue.tl_count;
00870 if (c == curcpu->c_self) {
00871 my_count = c->c_runqueue.tl_count;
00872 }
00873 spinlock_release(&c->c_runqueue_lock);
00874 }
00875
00876 one_share = DIVROUNDUP(total_count, numcpus);
00877 if (my_count < one_share) {
00878 return;
00879 }
00880
00881 to_send = my_count - one_share;
00882 threadlist_init(&victims);
00883 spinlock_acquire(&curcpu->c_runqueue_lock);
00884 for (i=0; i<to_send; i++) {
00885 t = threadlist_remtail(&curcpu->c_runqueue);
00886 threadlist_addhead(&victims, t);
00887 }
00888 spinlock_release(&curcpu->c_runqueue_lock);
00889
00890 for (i=0; i < numcpus && to_send > 0; i++) {
00891 c = cpuarray_get(&allcpus, i);
00892 if (c == curcpu->c_self) {
00893 continue;
00894 }
00895 spinlock_acquire(&c->c_runqueue_lock);
00896 while (c->c_runqueue.tl_count < one_share && to_send > 0) {
00897 t = threadlist_remhead(&victims);
00898
00899
00900
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920 if (t == curthread) {
00921 threadlist_addtail(&victims, t);
00922 to_send--;
00923 continue;
00924 }
00925
00926 t->t_cpu = c;
00927 threadlist_addtail(&c->c_runqueue, t);
00928 DEBUG(DB_THREADS,
00929 "Migrated thread %s: cpu %u -> %u",
00930 t->t_name, curcpu->c_number, c->c_number);
00931 to_send--;
00932 if (c->c_isidle) {
00933
00934
00935
00936
00937 ipi_send(c, IPI_UNIDLE);
00938 }
00939 }
00940 spinlock_release(&c->c_runqueue_lock);
00941 }
00942
00943
00944
00945
00946
00947
00948 if (!threadlist_isempty(&victims)) {
00949 spinlock_acquire(&curcpu->c_runqueue_lock);
00950 while ((t = threadlist_remhead(&victims)) != NULL) {
00951 threadlist_addtail(&curcpu->c_runqueue, t);
00952 }
00953 spinlock_release(&curcpu->c_runqueue_lock);
00954 }
00955
00956 KASSERT(threadlist_isempty(&victims));
00957 threadlist_cleanup(&victims);
00958 }
00959
00960
00961
00962
00963
00964
00965
00966
00967
00968
00969
00970
00971
00972
00973
00974 struct wchan *
00975 wchan_create(const char *name)
00976 {
00977 struct wchan *wc;
00978
00979 wc = kmalloc(sizeof(*wc));
00980 if (wc == NULL) {
00981 return NULL;
00982 }
00983 spinlock_init(&wc->wc_lock);
00984 threadlist_init(&wc->wc_threads);
00985 wc->wc_name = name;
00986 return wc;
00987 }
00988
00989
00990
00991
00992
00993 void
00994 wchan_destroy(struct wchan *wc)
00995 {
00996 spinlock_cleanup(&wc->wc_lock);
00997 threadlist_cleanup(&wc->wc_threads);
00998 kfree(wc);
00999 }
01000
01001
01002
01003
01004 void
01005 wchan_lock(struct wchan *wc)
01006 {
01007 spinlock_acquire(&wc->wc_lock);
01008 }
01009
01010 void
01011 wchan_unlock(struct wchan *wc)
01012 {
01013 spinlock_release(&wc->wc_lock);
01014 }
01015
01016
01017
01018
01019
01020
01021
01022 void
01023 wchan_sleep(struct wchan *wc)
01024 {
01025
01026 KASSERT(!curthread->t_in_interrupt);
01027
01028 thread_switch(S_SLEEP, wc);
01029 }
01030
01031
01032
01033
01034 void
01035 wchan_wakeone(struct wchan *wc)
01036 {
01037 struct thread *target;
01038
01039
01040 spinlock_acquire(&wc->wc_lock);
01041 target = threadlist_remhead(&wc->wc_threads);
01042
01043
01044
01045
01046 spinlock_release(&wc->wc_lock);
01047
01048 if (target == NULL) {
01049
01050 return;
01051 }
01052
01053 thread_make_runnable(target, false);
01054 }
01055
01056
01057
01058
01059 void
01060 wchan_wakeall(struct wchan *wc)
01061 {
01062 struct thread *target;
01063 struct threadlist list;
01064
01065 threadlist_init(&list);
01066
01067
01068
01069
01070
01071 spinlock_acquire(&wc->wc_lock);
01072 while ((target = threadlist_remhead(&wc->wc_threads)) != NULL) {
01073 threadlist_addtail(&list, target);
01074 }
01075
01076
01077
01078
01079 spinlock_release(&wc->wc_lock);
01080
01081
01082
01083
01084
01085
01086 while ((target = threadlist_remhead(&list)) != NULL) {
01087 thread_make_runnable(target, false);
01088 }
01089
01090 threadlist_cleanup(&list);
01091 }
01092
01093
01094
01095
01096
01097 bool
01098 wchan_isempty(struct wchan *wc)
01099 {
01100 bool ret;
01101
01102 spinlock_acquire(&wc->wc_lock);
01103 ret = threadlist_isempty(&wc->wc_threads);
01104 spinlock_release(&wc->wc_lock);
01105
01106 return ret;
01107 }
01108
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118 void
01119 ipi_send(struct cpu *target, int code)
01120 {
01121 KASSERT(code >= 0 && code < 32);
01122
01123 spinlock_acquire(&target->c_ipi_lock);
01124 target->c_ipi_pending |= (uint32_t)1 << code;
01125 mainbus_send_ipi(target);
01126 spinlock_release(&target->c_ipi_lock);
01127 }
01128
01129 void
01130 ipi_broadcast(int code)
01131 {
01132 unsigned i;
01133 struct cpu *c;
01134
01135 for (i=0; i < cpuarray_num(&allcpus); i++) {
01136 c = cpuarray_get(&allcpus, i);
01137 if (c != curcpu->c_self) {
01138 ipi_send(c, code);
01139 }
01140 }
01141 }
01142
01143 void
01144 ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
01145 {
01146 int n;
01147
01148 spinlock_acquire(&target->c_ipi_lock);
01149
01150 n = target->c_numshootdown;
01151 if (n == TLBSHOOTDOWN_MAX) {
01152 target->c_numshootdown = TLBSHOOTDOWN_ALL;
01153 }
01154 else {
01155 target->c_shootdown[n] = *mapping;
01156 target->c_numshootdown = n+1;
01157 }
01158
01159 target->c_ipi_pending |= (uint32_t)1 << IPI_TLBSHOOTDOWN;
01160 mainbus_send_ipi(target);
01161
01162 spinlock_release(&target->c_ipi_lock);
01163 }
01164
01165 void
01166 interprocessor_interrupt(void)
01167 {
01168 uint32_t bits;
01169 int i;
01170
01171 spinlock_acquire(&curcpu->c_ipi_lock);
01172 bits = curcpu->c_ipi_pending;
01173
01174 if (bits & (1U << IPI_PANIC)) {
01175
01176 cpu_halt();
01177 }
01178 if (bits & (1U << IPI_OFFLINE)) {
01179
01180 spinlock_acquire(&curcpu->c_runqueue_lock);
01181 if (!curcpu->c_isidle) {
01182 kprintf("cpu%d: offline: warning: not idle\n",
01183 curcpu->c_number);
01184 }
01185 spinlock_release(&curcpu->c_runqueue_lock);
01186 kprintf("cpu%d: offline.\n", curcpu->c_number);
01187 cpu_halt();
01188 }
01189 if (bits & (1U << IPI_UNIDLE)) {
01190
01191
01192
01193
01194 }
01195 if (bits & (1U << IPI_TLBSHOOTDOWN)) {
01196 if (curcpu->c_numshootdown == TLBSHOOTDOWN_ALL) {
01197 vm_tlbshootdown_all();
01198 }
01199 else {
01200 for (i=0; i<curcpu->c_numshootdown; i++) {
01201 vm_tlbshootdown(&curcpu->c_shootdown[i]);
01202 }
01203 }
01204 curcpu->c_numshootdown = 0;
01205 }
01206
01207 curcpu->c_ipi_pending = 0;
01208 spinlock_release(&curcpu->c_ipi_lock);
01209 }