00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035 #include <system_data_cells.h>
00036 #include <dyndef.h>
00037 #include <phddef.h>
00038
00039 #include <linux/config.h>
00040 #include <linux/mm.h>
00041 #include <linux/init.h>
00042 #include <linux/smp_lock.h>
00043 #include <linux/nmi.h>
00044 #include <linux/interrupt.h>
00045 #include <linux/kernel_stat.h>
00046 #include <linux/completion.h>
00047 #include <linux/prefetch.h>
00048 #include <linux/compiler.h>
00049 #include <asm/uaccess.h>
00050 #include <asm/mmu_context.h>
00051 #include <sysgen.h>
00052 #include <ipldef.h>
00053 #include <ipl.h>
00054 #include <statedef.h>
00055 #include <queue.h>
00056 #include<system_service_setup.h>
00057 #include <internals.h>
00058 #include <exe_routines.h>
00059 #include <sch_routines.h>
00060 #include <misc_routines.h>
00061 #include <cpbdef.h>
00062 #include <rsndef.h>
00063 #include <statedef.h>
00064 #ifdef __arch_um__
00065 #include <asm-i386/hw_irq.h>
00066 #endif
00067
00068 #undef DEBUG_SCHED
00069
00070 #ifdef CONFIG_VMS
00071 #define DEBUG_SCHED
00072 #endif
00073 #define DEBUG_SCHED
00074
00075
00076
00077
00078
00079
00080 extern void timer_bh(void);
00081 extern void tqueue_bh(void);
00082 extern void immediate_bh(void);
00083
00084 int done_init_idle=0;
00085
00086
00087
00088
00089
00090 unsigned securebits = SECUREBITS_DEFAULT;
00091
00092 extern void mem_use(void);
00093
00094 int mydebug4 = 0;
00095 int mydebug5 = 0;
00096 int mydebug6 = 1;
00097
00098
00099
00100
00101
00102 mycheckaddr(unsigned int ctl){
00103 #if 1
00104 int nr0=nr_running;
00105 int i,n=0,m=0;
00106 struct _pcb *tmp2 = 0;
00107 unsigned long tmp;
00108 int pan=0;
00109 for(i=0;i<32;i++) {
00110 tmp=&sch_aq_comh[i];
00111 if(*(unsigned long *)tmp == tmp) {
00112 if (test_bit(i,&sch_gl_comqs))
00113 panic("testbit %x %x\n",i,sch_gl_comqs);
00114 }
00115 if(*(unsigned long *)tmp == tmp) {; } else {
00116 tmp2=tmp;
00117 do {
00118 n++;
00119 if (tmp2!=tmp && i!=tmp2->pcb_b_pri)
00120 panic("wrong pri in q %x %x %x\n",tmp2,i,tmp2->pcb_b_pri);
00121 if (tmp2!=tmp && tmp2->pcb_b_type!=DYN$C_PCB)
00122 panic("wrong type in q %x %x %x\n",tmp2,i,tmp2->pcb_b_type);
00123 if (ctl<42 && ctl_gl_pcb==tmp2)
00124 panic("ctl_gl_pcb on comq\n");
00125 if (ctl>42 && ctl==tmp2)
00126 panic("next ctl_gl_pcb on comq\n");
00127 if (tmp2!=(tmp2->pcb_l_sqfl->pcb$l_sqbl)) { pan=1; goto mypanic; }
00128 if (tmp2!=(tmp2->pcb_l_sqbl->pcb$l_sqfl)) { pan=2; goto mypanic; }
00129 tmp2=tmp2->pcb_l_sqfl;
00130 } while (tmp2!=tmp);
00131 n--;
00132 }
00133 }
00134 for(i=0;i<32;i++) {
00135 tmp=&sch_aq_comh[i];
00136 if(*(unsigned long *)tmp == tmp) {; } else {
00137 tmp2=tmp;
00138 do {
00139 m++;
00140 tmp2=tmp2->pcb_l_sqbl;
00141 if (tmp2!=(tmp2->pcb_l_sqfl->pcb$l_sqbl)) { pan=3; goto mypanic; }
00142 if (tmp2!=(tmp2->pcb_l_sqbl->pcb$l_sqfl)) { pan=4; goto mypanic; }
00143 } while (tmp2!=tmp);
00144 m--;
00145 }
00146 }
00147 if (nr0!=nr_running) { return; }
00148 if (n!=m) { pan=5|(n<<4)|(m<<8); goto mypanic; }
00149 #if 0
00150 unsigned long * f=&sch_aq_comh[31];
00151 unsigned long * b=&sch_aq_comt[31];
00152 if (*f==f && *b!=f)
00153 { pan=6; goto mypanic; }
00154 if (*b==f && *f!=f)
00155 { pan=7; goto mypanic; }
00156 if (nr_running<2 && *b!=*f)
00157 {pan=8; goto mypanic; }
00158 printk("mypanic pan %x %x %x %x %x %x\n",pan,nr_running,f,b,*f,*b);
00159 #endif
00160 return;
00161 mypanic:
00162 printk("mypanic pan %x %x %x %x %x %x\n",pan,i,n,m,tmp,tmp2);
00163 printk("mypanic %x %x %x %x %x\n",tmp2->pcb_l_sqfl,tmp2->pcb$l_sqfl->pcb$l_sqbl,tmp2->pcb$l_sqbl,tmp2->pcb$l_sqbl->pcb$l_sqfl,42);
00164 cli();
00165 #ifdef __i386__
00166 sickinsque(0x11111111,0x22222222);
00167 #endif
00168 while(1) {; };
00169 #endif
00170 }
00171
00172 int numproc(void) {
00173 int i,n=0;
00174 struct _pcb *tmp2;
00175 unsigned long tmp;
00176 for(i=0;i<32;i++) {
00177 tmp=&sch_aq_comh[i];
00178 if(*(unsigned long *)tmp == tmp) {; } else {
00179 tmp2=tmp;
00180 do {
00181 n++;
00182 tmp2=tmp2->pcb_l_sqfl;
00183
00184 } while (tmp2!=tmp);
00185 n--;
00186
00187 }
00188 }
00189 return n;
00190 }
00191
00192 void printcom(void) {
00193 int i;
00194 struct _pcb *tmp2;
00195 unsigned long tmp;
00196 printk(KERN_EMERG "cpusch %x\n",sch_gl_comqs);
00197 for(i=16;i<32;i++) {
00198 tmp=&sch_aq_comh[i];
00199 if(*(unsigned long *)tmp == tmp) {; } else {
00200 tmp2=tmp;
00201 printk(KERN_EMERG "com %x ",i);
00202 do {
00203 printk(KERN_EMERG "%x %x %x %x| ",tmp2,tmp2->pcb_l_sqfl,tmp2->pcb$l_pid,tmp2->pcb$b_pri);
00204 tmp2=tmp2->pcb_l_sqfl;
00205 } while (tmp2!=tmp);
00206 printk(KERN_EMERG "\n");
00207 }
00208 }
00209 printk(KERN_EMERG "\n");
00210 }
00211
00212 void printcom2(void) {
00213 int i;
00214 struct _pcb *tmp2;
00215 unsigned long tmp;
00216 printk(KERN_EMERG "cpusch %x\n",sch_gl_comqs);
00217 for(i=16;i<32;i++) {
00218 #ifdef __i386__
00219 tmp=&sch_aq_comt[i];
00220 #else
00221 tmp=&sch_aq_comh[i][1];
00222 #endif
00223 if(*(unsigned long *)tmp == tmp) {; } else {
00224 tmp2=tmp;
00225 printk(KERN_EMERG "com %x ",i);
00226 do {
00227 printk(KERN_EMERG "%x %x %x %x| ",tmp2,tmp2->pcb_l_sqbl,tmp2->pcb$l_pid,tmp2->pcb$b_pri);
00228 tmp2=tmp2->pcb_l_sqbl;
00229 } while (tmp2!=tmp);
00230 printk(KERN_EMERG "\n");
00231 }
00232 }
00233 printk(KERN_EMERG "\n");
00234 }
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247 #if HZ < 200
00248 #define TICK_SCALE(x) ((x) >> 2)
00249 #elif HZ < 400
00250 #define TICK_SCALE(x) ((x) >> 1)
00251 #elif HZ < 800
00252 #define TICK_SCALE(x) (x)
00253 #elif HZ < 1600
00254 #define TICK_SCALE(x) ((x) << 1)
00255 #else
00256 #define TICK_SCALE(x) ((x) << 2)
00257 #endif
00258
00259 #define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1)
00260
00261
00262
00263
00264
00265
00266
00267 struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280 spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
00281 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
00282
00283 struct kernel_stat kstat;
00284 extern struct task_struct *child_reaper;
00285
00286 #ifdef CONFIG_SMP
00287
00288 #define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
00289 #define can_schedule(p,cpu) \
00290 ((p)->cpus_runnable & (p)->cpus_allowed & (1 << cpu))
00291
00292 #else
00293
00294 #define idle_task(cpu) (&init_task)
00295 #define can_schedule(p,cpu) (1)
00296
00297 #endif
00298
00299 void scheduling_functions_start_here(void) { }
00300
00301
00302
00303
00304
00305
00306 static FASTCALL(void reschedule_idle(struct task_struct * p));
00307
00308 static void fastcall reschedule_idle(struct task_struct * p)
00309 {
00310 #ifdef CONFIG_SMP
00311 #if 0
00312
00313 int this_cpu = smp_processor_id();
00314 struct task_struct *tsk, *target_tsk;
00315 int cpu, best_cpu, i, max_prio;
00316 cycles_t oldest_idle;
00317
00318
00319
00320
00321
00322 best_cpu = p->pcb_l_cpu_id;
00323 if (can_schedule(p, best_cpu)) {
00324 tsk = idle_task(best_cpu);
00325 if (cpu_curr(best_cpu) == tsk) {
00326 int need_resched;
00327 send_now_idle:
00328
00329
00330
00331
00332
00333 need_resched = tsk->need_resched;
00334 tsk->need_resched = 1;
00335 if ((best_cpu != this_cpu) && !need_resched)
00336 smp_send_reschedule(best_cpu);
00337 return;
00338 }
00339 }
00340
00341
00342
00343
00344
00345
00346
00347
00348 oldest_idle = (cycles_t) -1;
00349 target_tsk = NULL;
00350 max_prio = 0;
00351
00352 for (i = 0; i < smp_num_cpus; i++) {
00353 cpu = cpu_logical_map(i);
00354 if (!can_schedule(p, cpu))
00355 continue;
00356 tsk = cpu_curr(cpu);
00357
00358
00359
00360
00361
00362 if (tsk == idle_task(cpu)) {
00363 if (last_schedule(cpu) < oldest_idle) {
00364 oldest_idle = last_schedule(cpu);
00365 target_tsk = tsk;
00366 }
00367 } else {
00368 if (oldest_idle == -1ULL) {
00369 int prio = preemption_goodness(tsk, p, cpu);
00370
00371 if (prio > max_prio) {
00372 max_prio = prio;
00373 target_tsk = tsk;
00374 }
00375 }
00376 }
00377 }
00378 tsk = target_tsk;
00379 if (tsk) {
00380 if (oldest_idle != -1ULL) {
00381 best_cpu = tsk->pcb_l_cpu_id;
00382 goto send_now_idle;
00383 }
00384 tsk->need_resched = 1;
00385 if (tsk->pcb_l_cpu_id != this_cpu)
00386 smp_send_reschedule(tsk->pcb_l_cpu_id);
00387 }
00388 return;
00389
00390 #endif
00391 #else
00392 int this_cpu = smp_processor_id();
00393 struct task_struct *tsk;
00394
00395 tsk = ctl_gl_pcb;
00396 if (p->pcb_b_pri >= tsk->pcb$b_pri)
00397 tsk->need_resched = 1;
00398 #endif
00399 }
00400
00401
00402
00403
00404
00405 int task_on_comqueue(struct _pcb *p) {
00406 int i,found=0;
00407 struct _pcb *tmp2;
00408 struct _pcb * tmp;
00409 for(i=0;i<32;i++) {
00410 tmp=&sch_aq_comh[i];
00411 if(*(unsigned long *)tmp == tmp) {; } else {
00412 tmp=tmp->pcb_l_sqfl;
00413 tmp2=tmp;
00414 do {
00415
00416
00417 if (tmp2 == p) found=1;
00418 tmp2=tmp2->pcb_l_sqfl;
00419 } while (tmp2!=tmp);
00420 }
00421 }
00422 return found;
00423 }
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433 static inline int try_to_wake_up(struct task_struct * p, int synchronous)
00434 {
00435 unsigned long flags;
00436 int success = 0;
00437 unsigned long qhead;
00438 int curpri;
00439 int before,after;
00440 int cpuid = smp_processor_id();
00441 struct _cpu * cpu=smp_gl_cpu_data[cpuid];
00442
00443 spin_lock_irqsave(&runqueue_lock, flags);
00444 p->state = TASK_RUNNING;
00445 p->pcb_w_state = SCH$C_COM;
00446 if (p==ctl_gl_pcb)
00447 goto out;
00448 if (task_on_comqueue(p))
00449 goto out;
00450
00451
00452 curpri=p->pcb_b_pri;
00453 if (mydebug4) printk("add tyr %x %x\n",p->pcb_l_pid,curpri);
00454 #ifdef __i386__
00455 qhead=*(unsigned long *)&sch_aq_comt[curpri];
00456 #else
00457 qhead=*(unsigned long *)&sch_aq_comh[curpri][1];
00458 #endif
00459 if (mydebug4) printk("eq qhead %x %x %x %x\n",
00460 (unsigned long *)qhead,(unsigned long *)(qhead+4),
00461 *(unsigned long *)qhead,*(unsigned long *)(qhead+4));
00462 if (mydebug4) printk("p %x %x %x\n",qhead,*(void**)qhead,p);
00463 if (mydebug4) printk("iq3 %x\n",sch_aq_comh[curpri]);
00464
00465 before=numproc();
00466
00467
00468 mycheckaddr(0);
00469 insque(p,qhead);
00470 nr_running++;
00471 after=numproc();
00472 if(after-before!=1) {
00473 printcom();
00474 printcom2();
00475 panic("insq1 %x %x %x %x\n",p,p->pcb_l_pid,before,after);
00476 }
00477
00478 if (mydebug4) printk("p %x %x %x\n",qhead,*(void**)qhead,p);
00479 if (mydebug4) printk("iq3 %x\n",sch_aq_comh[curpri]);
00480 if (mydebug4) printk("eq qhead %x %x %x %x\n",
00481 (unsigned long *)qhead,(unsigned long *)(qhead+4),
00482 *(unsigned long *)qhead,*(unsigned long *)(qhead+4));
00483
00484 if (mydebug4) printk("comq1 %x %x\n",curpri,sch_gl_comqs);
00485
00486 sch_gl_comqs=sch$gl_comqs | (1 << curpri);
00487
00488 mycheckaddr(0);
00489
00490 if (mydebug4) printk("comq1 %x %x\n",curpri,sch_gl_comqs);
00491 if (mydebug4) {
00492 int i;
00493 struct _pcb *tmp2,*tmp3=qhead;
00494 unsigned long tmp;
00495 printk("p %x\n",p);
00496 printk("%x %x %x %x\n",tmp3,tmp3->pcb_l_sqfl,tmp3->pcb$l_sqfl->pcb$l_sqfl,tmp3->pcb$l_sqfl->pcb$l_sqfl->pcb$l_sqfl);
00497 for(i=0;i<32;i++) {
00498 tmp=&sch_aq_comh[i];
00499
00500 if(*(unsigned long *)tmp == tmp) {; } else {
00501 tmp2=((struct _pcb *)tmp)->pcb_l_sqfl->pcb$l_sqfl;
00502 do {
00503 printk("com2 %x %x %x %x\n",tmp2,tmp2->pcb_l_pid,tmp2->pcb$b_pri,i);
00504 tmp2=tmp2->pcb_l_sqfl;
00505 } while (tmp2!=tmp);
00506 }
00507 }
00508 }
00509 if (!synchronous || !(p->cpus_allowed & (1 << smp_processor_id())))
00510 reschedule_idle(p);
00511 success = 1;
00512
00513 out:
00514 spin_unlock_irqrestore(&runqueue_lock, flags);
00515 return success;
00516 }
00517
00518 inline int fastcall wake_up_process(struct task_struct * p)
00519 {
00520 return try_to_wake_up(p, 0);
00521 }
00522
00523 static void process_timeout(unsigned long __data)
00524 {
00525 struct task_struct * p = (struct task_struct *) __data;
00526
00527 wake_up_process(p);
00528 }
00529
00530
00531
00532
00533
00534
00535
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548
00549
00550
00551
00552
00553
00554
00555
00556 signed long fastcall schedule_timeout(signed long timeout)
00557 {
00558 struct timer_list timer;
00559 unsigned long expire;
00560
00561 switch (timeout)
00562 {
00563 case MAX_SCHEDULE_TIMEOUT:
00564
00565
00566
00567
00568
00569
00570
00571 schedule();
00572 goto out;
00573 default:
00574
00575
00576
00577
00578
00579
00580
00581 if (timeout < 0)
00582 {
00583 printk(KERN_ERR "schedule_timeout: wrong timeout "
00584 "value %lx from %p\n", timeout,
00585 __builtin_return_address(0));
00586 current->state = TASK_RUNNING;
00587 current->pcb_w_state = SCH$C_CUR;
00588 goto out;
00589 }
00590 }
00591
00592 expire = timeout + jiffies;
00593
00594 init_timer(&timer);
00595 timer.expires = expire;
00596 timer.data = (unsigned long) current;
00597 timer.function = process_timeout;
00598
00599 add_timer(&timer);
00600 schedule();
00601 del_timer_sync(&timer);
00602
00603 timeout = expire - jiffies;
00604
00605 out:
00606 return timeout < 0 ? 0 : timeout;
00607 }
00608
00609
00610
00611
00612 asmlinkage void sch_sched(int);
00613
00614 extern int fix_init_thread;
00615
00616 int countme=500;
00617
00618
00619
00620
00621
00622 asmlinkage void sch_resched(void) {
00623 int cpuid = smp_processor_id();
00624 struct _cpu * cpu=smp_gl_cpu_data[cpuid];
00625 struct _pcb * curpcb;
00626 unsigned long curpri;
00627 unsigned long qhead;
00628 int before,after;
00629
00630
00631
00632
00633
00634
00635 #ifdef __x86_64__
00636 if (intr_blocked(IPL__RESCHED))
00637 return;
00638 regtrap(REG_INTR,IPL__RESCHED);
00639 #endif
00640
00641
00642 setipl(IPL__SCHED);
00643 vmslock(&SPIN_SCHED,-1);
00644
00645 spin_lock_irq(&runqueue_lock);
00646
00647
00648
00649 curpcb=cpu->cpu_l_curpcb;
00650
00651 release_kernel_lock(curpcb, cpuid);
00652
00653 curpri=cpu->cpu_b_cur_pri;
00654
00655
00656
00657 sch_al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask );
00658
00659
00660
00661 if (!sch_al_cpu_priority[curpri])
00662 sch_gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri)));
00663
00664
00665
00666 if (curpcb == idle_task(curpcb->pcb_l_cpu_id))
00667 goto out;
00668
00669 if (curpcb->state==TASK_INTERRUPTIBLE)
00670 if (signal_pending(curpcb)) {
00671 curpcb->state = TASK_RUNNING;
00672 curpcb->pcb_w_state = SCH$C_CUR;
00673 }
00674
00675 #if 0
00676 if (curpcb->state!=TASK_RUNNING) {
00677 curpcb->pcb_w_state=SCH$C_LEF;
00678 }
00679 #endif
00680
00681 #if 0
00682 if (curpcb->state==TASK_RUNNING) {
00683 #endif
00684 #ifdef DEBUG_SCHED
00685 before=numproc();
00686
00687
00688
00689 mycheckaddr(0);
00690
00691 #endif
00692
00693
00694
00695 sch_gl_comqs=sch$gl_comqs | (1 << curpri);
00696
00697
00698
00699
00700 curpcb->pcb_w_state=SCH$C_COM;
00701
00702
00703
00704 #ifdef __i386__
00705 qhead=*(unsigned long *)&sch_aq_comt[curpri];
00706 #else
00707 qhead=*(unsigned long *)&sch_aq_comh[curpri][1];
00708 #endif
00709 if (!task_on_comqueue(curpcb)) {
00710 if (curpcb==qhead) panic(" a panic\n");
00711 insque(curpcb,qhead);
00712 } else {
00713 panic("something\n");
00714 }
00715 #ifdef DEBUG_SCHED
00716 mycheckaddr(42);
00717 #endif
00718
00719 nr_running++;
00720 #ifdef DEBUG_SCHED
00721 after=numproc();
00722 if(after-before!=1) {
00723
00724 printcom();
00725 panic("insq2 %x %x\n",before,after);
00726 }
00727 #endif
00728
00729 out:
00730
00731 sch_gl_idle_cpus=0;
00732 #if 0
00733 }
00734 #endif
00735
00736 sch_sched(1);
00737 }
00738
00739
00740
00741
00742
00743
00744
00745
00746
00747
00748
00749
00750
00751
00752 int mydebug=0;
00753 int mydebug2=0;
00754 int mycount=0;
00755
00756
00757
00758
00759 asmlinkage void schedule(void) {
00760 SOFTINT_RESCHED_VECTOR;
00761
00762
00763 }
00764
00765 asmlinkage void sch_sched(int from_sch$resched) {
00766 int cpuid = smp_processor_id();
00767 struct _cpu * cpu=smp_gl_cpu_data[cpuid];
00768 struct _pcb *next = 0, *curpcb;
00769 int curpri, affinity;
00770 unsigned char tmppri;
00771 unsigned long qhead = 0;
00772 int after, before;
00773
00774 curpcb=cpu->cpu_l_curpcb;
00775 curpri=cpu->cpu_b_cur_pri;
00776
00777
00778
00779 if (from_sch_resched == 1)
00780 goto skip_lock;
00781
00782 #if 0
00783
00784
00785
00786 if (intr_blocked(IPL__SCHED))
00787 return;
00788
00789 regtrap(REG_INTR,IPL__SCHED);
00790 #endif
00791
00792 int ipl = getipl();
00793 if (ipl != 8 || SPIN_SCHED.spl_l_spinlock == 0)
00794 panic("schsch\n");
00795
00796 #if 0
00797
00798
00799 setipl(IPL__SCHED);
00800 vmslock(&SPIN_SCHED,-1);
00801 #endif
00802
00803
00804
00805 sch_al_cpu_priority[curpri]=sch$al_cpu_priority[curpri] & (~ cpu->cpu$l_cpuid_mask );
00806
00807
00808
00809 if (sch_al_cpu_priority[curpri])
00810 goto skip_lock;
00811
00812
00813
00814 sch_gl_active_priority=sch$gl_active_priority & (~ (1 << (31-curpri)));
00815
00816
00817
00818
00819
00820
00821 spin_lock_prefetch(&runqueue_lock);
00822
00823 if (!curpcb->active_mm) BUG();
00824
00825 release_kernel_lock(curpcb, cpuid);
00826
00827 spin_lock_irq(&runqueue_lock);
00828
00829 skip_lock:
00830
00831
00832
00833 affinity=0;
00834 struct _pcb * aff_next = 0;
00835
00836
00837
00838 tmppri=ffs(sch_gl_comqs);
00839 #ifdef DEBUG_SCHED
00840 if (mydebug5)
00841 printk("ffs %x %x\n",tmppri,sch_gl_comqs);
00842 #endif
00843
00844 if (!tmppri) {
00845
00846 #if 0
00847
00848 goto sch_idle;
00849 #endif
00850 go_idle:
00851
00852 sch_gl_idle_cpus=sch$gl_idle_cpus | (cpu->cpu$l_cpuid_mask);
00853
00854
00855 next=idle_task(cpuid);
00856 goto skip_cap;
00857 } else {
00858
00859 tmppri--;
00860 gethead:
00861 qhead=*(unsigned long *)&sch_aq_comh[tmppri];
00862 #ifdef DEBUG_SCHED
00863 if (mydebug4) printk("eq qhead %x %x %x %x\n",
00864 (unsigned long *)qhead,(unsigned long *)(qhead+4),
00865 *(unsigned long *)qhead,*(unsigned long *)(qhead+4));
00866
00867 if (mydebug) printcom();
00868
00869 if (mydebug4) printk("next %x %x %x %x\n",qhead,*(void**)qhead,next,sch_aq_comh[tmppri]);
00870 before=numproc();
00871 #endif
00872 #ifdef DEBUG_SCHED
00873 mycheckaddr(42);
00874 #endif
00875
00876
00877 next=(struct _pcb *) remque(qhead,next);
00878
00879
00880 qhead_init(next);
00881
00882
00883 nr_running--;
00884 #ifdef DEBUG_SCHED
00885 after=numproc();
00886 if(before-after!=1) {
00887 int i;
00888 for (i=0;i<2000000000;i++) ;
00889 panic("remq1 %x %x\n",before,after);
00890 }
00891 if (mydebug4) printk("next %x %x %x %x\n",qhead,*(void**)qhead,next,sch_aq_comh[tmppri]);
00892 if (mydebug4) printk("comh %x %x\n",sch_aq_comh[tmppri],((struct _pcb *) sch$aq_comh[tmppri])->pcb$l_sqfl);
00893 #endif
00894
00895
00896
00897 if (sch_aq_comh[tmppri]==&sch$aq_comh[tmppri])
00898 sch_gl_comqs=sch$gl_comqs & (~(1 << tmppri));
00899 #ifdef DEBUG_SCHED
00900 mycheckaddr(next);
00901 #endif
00902
00903
00904 if (mydebug5) printk("comq3 %x %x %x\n",tmppri,sch_gl_comqs,(~(1 << tmppri)));
00905
00906 }
00907
00908 if(next==0) { int j; printk("qel0\n"); for(j=0;j<1000000000;j++) ; }
00909
00910
00911
00912 if (mydebug4) printk("eq qhead %x %x %x %x\n",
00913 (unsigned long *)qhead,(unsigned long *)(qhead+4),
00914 *(unsigned long *)qhead,*(unsigned long *)(qhead+4));
00915
00916 check_pcb_ok:
00917 #if 0
00918 if (next->pcb_b_type != DYN$C_PCB)
00919 panic("DYN_C_PCB\n");
00920 #endif
00921
00922 if ((next->pcb_l_capability & cpu->cpu$l_capability) != next->pcb$l_capability) {
00923 if (next->pcb_l_capability & CPB$M_IMPLICIT_AFFINITY) {
00924 if (next->pcb_l_affinity == cpu->cpu$l_phy_cpuid)
00925 goto skip_cap;
00926 next->pcb_b_affinity_skip--;
00927 if (next->pcb_b_affinity_skip == 0) {
00928 next->pcb_b_affinity_skip = sch$gl_affinity_skip;
00929 next->pcb_l_affinity = cpu->cpu$l_phy_cpuid;
00930
00931 goto skip_cap;
00932 }
00933 if (!affinity) {
00934 affinity = 1;
00935 aff_next = next;
00936 }
00937 insque(next, ((struct _pcb *)qhead)->pcb_l_sqbl);
00938 sch_gl_comqs |= 1 << tmppri;
00939 if (next != ((struct _pcb *)qhead)->pcb_l_sqfl) {
00940 next=(struct _pcb *) remque(qhead,next);
00941 printk("go check_pcb_ok\n");
00942 goto check_pcb_ok;
00943 }
00944 nextqueue:
00945
00946 for(tmppri++; tmppri<32; tmppri++) {
00947 if (sch_aq_comh[tmppri]!=&sch$aq_comh[tmppri])
00948 break;
00949 }
00950 if (tmppri == 32) {
00951 if (affinity == 0)
00952 goto go_idle;
00953 } else {
00954 if (affinity == 0)
00955 goto gethead;
00956 }
00957
00958 if (tmppri<=aff_next->pcb_b_pri) {
00959 printk("gethead\n");
00960 goto gethead;
00961 }
00962 next = aff_next->pcb_l_sqfl;
00963 remque(aff_next, 0);
00964 if (aqempty(aff_next))
00965 sch_gl_comqs=sch$gl_comqs & (~(1 << next->pcb$b_pri));
00966 next->pcb_b_affinity_skip = sch$gl_affinity_skip;
00967 next->pcb_l_affinity = cpu->cpu$l_phy_cpuid;
00968
00969 } else {
00970 if (sch_gl_capability_sequence != next->pcb$l_capability_seq)
00971 next->pcb_l_current_affinity = sch$calculate_affinity(next->pcb$l_affinity);
00972 if ((next->pcb_l_current_affinity & (1 << next->pcb$l_cpu_id)) == 0) {
00973 panic("rwait for capability\n");
00974 sch_gl_resmask |= RSN$_CPUCAP;
00975 next->pcb_l_efwm = RSN$_CPUCAP;
00976 next->pcb_w_state = SCH$C_MWAIT;
00977
00978 struct _pcb * pcb = next->pcb_l_sqfl;
00979 insque(next,sch_gq_mwait);
00980 ((struct _wqh *)sch_gq_mwait)->wqh$l_wqcnt++;
00981 if (aqempty(pcb))
00982 goto nextqueue;
00983 next = pcb;
00984 remque(next, 0);
00985 goto check_pcb_ok;
00986 }
00987 }
00988 }
00989 skip_cap:
00990
00991
00992 cpu->cpu_l_curpcb=next;
00993 cpu->cpu_b_ipl=next->psl_ipl;
00994
00995
00996 next->state=TASK_RUNNING;
00997 next->pcb_w_state = SCH$C_CUR;
00998
00999
01000 next->pcb_l_cpu_id=cpu->cpu$l_phy_cpuid;
01001
01002
01003 if (next->pcb_b_pri<next->pcb$b_prib) next->pcb$b_pri++;
01004
01005
01006 cpu->cpu_b_cur_pri=next->pcb$b_pri;
01007
01008
01009 sch_gl_idle_cpus=sch$gl_idle_cpus & (~ cpu->cpu$l_cpuid_mask);
01010
01011
01012 sch_al_cpu_priority[cpu->cpu$b_cur_pri]=sch$al_cpu_priority[cpu->cpu$b_cur_pri] | (cpu->cpu$l_cpuid_mask);
01013
01014
01015 sch_gl_active_priority=sch$gl_active_priority | (1 << (31-cpu->cpu$b_cur_pri));
01016
01017 #ifdef DEBUG_SCHED
01018 if (mydebug5) {
01019 printk("pri %x %x %x %x %x %x\n",curpcb,curpcb->pcb_l_pid,curpcb->pcb$b_pri,next,next->pcb$l_pid,next->pcb$b_pri);
01020 printk("cpusch %x %x\n",cpu->cpu_b_cur_pri,sch$gl_comqs);
01021 printcom();
01022 }
01023 #endif
01024
01025 #if 0
01026 curpcb->need_resched = 0;
01027 #endif
01028
01029 #ifdef DEBUG_SCHED
01030 if (mydebug4) { int i; for(i=0;i<100000000;i++) ; }
01031 #endif
01032
01033 if (next == curpcb) {
01034
01035 spin_unlock_irq(&runqueue_lock);
01036 vmsunlock(&SPIN_SCHED,-1);
01037 reacquire_kernel_lock(curpcb);
01038
01039 goto return_a_reimac;
01040 return;
01041 }
01042
01043
01044 next->pcb_l_cpu_id = curpcb->pcb$l_cpu_id;
01045
01046
01047 spin_unlock_irq(&runqueue_lock);
01048 vmsunlock(&SPIN_SCHED,-1);
01049
01050
01051
01052 if (mydebug6)
01053 if (next>=&sch_aq_comh[0] && next<=&sch$aq_comh[33]) {
01054 panic("ga!\n");
01055 printk("ga!\n");
01056 { int j; for(j=0;j<1000000000;j++) ; }
01057 }
01058
01059
01060 kstat.context_swtch++;
01061
01062
01063
01064
01065
01066
01067
01068
01069
01070
01071
01072 prepare_to_switch();
01073 {
01074 struct mm_struct *mm = next->mm;
01075 struct mm_struct *oldmm = curpcb->active_mm;
01076 if (!mm) {
01077 if (next->active_mm) { printk("bu %x %x %x\n",next,next->pcb_l_pid,next->pcb$b_pri); { int j; for(j=0;j<1000000000;j++) ; }; BUG(); }
01078 next->active_mm = oldmm;
01079 atomic_inc(&oldmm->mm_count);
01080 enter_lazy_tlb(oldmm, next, cpuid);
01081 } else {
01082 if (next->active_mm != mm) BUG();
01083 switch_mm(oldmm, mm, next, cpuid);
01084 }
01085
01086 if (!curpcb->mm) {
01087 curpcb->active_mm = NULL;
01088 mmdrop(oldmm);
01089 }
01090 }
01091
01092
01093
01094
01095
01096
01097 #ifdef DEBUG_SCHED
01098 if (mydebug4) printk("bef swto\n");
01099
01100 #endif
01101
01102
01103 next->pr_astlvl=next->phd_b_astlvl;
01104 if (from_sch_resched==0) {
01105
01106 }
01107 #ifdef __i386__
01108 switch_to(curpcb, next, curpcb);
01109 #else
01110 long tmp1 = 0;
01111 long tmp2 = __pa(next->mm->pgd);
01112 switch_to(curpcb, next, curpcb, tmp1, tmp2);
01113 #endif
01114
01115
01116
01117
01118 goto return_a_reimac;
01119 return;
01120 qempty:
01121 panic("qempty");
01122 return;
01123 sch_idle:
01124
01125 sch_gl_idle_cpus=sch$gl_idle_cpus | (cpu->cpu$l_cpuid_mask);
01126
01127 cpu->cpu_l_curpcb = idle_task(cpuid);
01128 cpu->cpu_b_cur_pri=-1;
01129 vmsunlock(&SPIN_SCHED,IPL__RESCHED);
01130 for (; sch_gl_idle_cpus & (1<<cpuid); ) nop();
01131
01132 vmslock(&SPIN_SCHED,IPL__SCHED);
01133 #if 0
01134 cpu->cpu_q_sched_flags |= CPU$M_SCHED;
01135 #endif
01136 goto skip_lock;
01137 return;
01138 return_a_reimac:
01139 if (from_sch_resched==0) {
01140
01141 }
01142 }
01143
01144
01145
01146
01147
01148
01149
01150
01151
01152
01153 static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
01154 int nr_exclusive, const int sync)
01155 {
01156 struct list_head *tmp;
01157 struct task_struct *p;
01158
01159 CHECK_MAGIC_WQHEAD(q);
01160 WQ_CHECK_LIST_HEAD(&q->task_list);
01161
01162 #ifdef __i386__
01163 list_for_each(tmp,&q->task_list) {
01164 unsigned int state;
01165 wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
01166
01167 CHECK_MAGIC(curr->__magic);
01168 p = curr->task;
01169 state = p->state;
01170 if (state & mode) {
01171 WQ_NOTE_WAKER(curr);
01172 if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
01173 break;
01174 }
01175 }
01176 #endif
01177 }
01178
01179 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
01180 {
01181 #ifndef CONFIG_VMS
01182 if (q) {
01183 unsigned long flags;
01184 wq_read_lock_irqsave(&q->lock, flags);
01185 __wake_up_common(q, mode, nr, 0);
01186 wq_read_unlock_irqrestore(&q->lock, flags);
01187 }
01188 #endif
01189 }
01190
01191 void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
01192 {
01193 #ifndef CONFIG_VMS
01194 if (q) {
01195 unsigned long flags;
01196 wq_read_lock_irqsave(&q->lock, flags);
01197 __wake_up_common(q, mode, nr, 1);
01198 wq_read_unlock_irqrestore(&q->lock, flags);
01199 }
01200 #endif
01201 }
01202
01203 void fastcall complete(struct completion *x)
01204 {
01205 unsigned long flags;
01206
01207 spin_lock_irqsave(&x->wait.lock, flags);
01208 x->done++;
01209 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
01210 spin_unlock_irqrestore(&x->wait.lock, flags);
01211 }
01212
01213 void fastcall wait_for_completion(struct completion *x)
01214 {
01215 #ifndef CONFIG_VMS
01216 spin_lock_irq(&x->wait.lock);
01217 if (!x->done) {
01218 DECLARE_WAITQUEUE(wait, current);
01219
01220 wait.flags |= WQ_FLAG_EXCLUSIVE;
01221 __add_wait_queue_tail(&x->wait, &wait);
01222 do {
01223 __set_current_state(TASK_UNINTERRUPTIBLE);
01224 current->pcb_w_state = 0;
01225 spin_unlock_irq(&x->wait.lock);
01226 schedule();
01227 spin_lock_irq(&x->wait.lock);
01228 } while (!x->done);
01229 __remove_wait_queue(&x->wait, &wait);
01230 }
01231 x->done--;
01232 spin_unlock_irq(&x->wait.lock);
01233 #endif
01234 }
01235
01236 #define SLEEP_ON_VAR \
01237 unsigned long flags; \
01238 wait_queue_t wait; \
01239 init_waitqueue_entry(&wait, current);
01240
01241 #define SLEEP_ON_HEAD \
01242 wq_write_lock_irqsave(&q->lock,flags); \
01243 __add_wait_queue(q, &wait); \
01244 wq_write_unlock(&q->lock);
01245
01246 #define SLEEP_ON_TAIL \
01247 wq_write_lock_irq(&q->lock); \
01248 __remove_wait_queue(q, &wait); \
01249 wq_write_unlock_irqrestore(&q->lock,flags);
01250
01251 void fastcall interruptible_sleep_on(wait_queue_head_t *q)
01252 {
01253 SLEEP_ON_VAR
01254
01255 current->state = TASK_INTERRUPTIBLE;
01256 current->pcb_w_state = 0;
01257
01258 SLEEP_ON_HEAD
01259 schedule();
01260 SLEEP_ON_TAIL
01261 }
01262
01263 long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
01264 {
01265 SLEEP_ON_VAR
01266
01267 current->state = TASK_INTERRUPTIBLE;
01268 current->pcb_w_state = 0;
01269
01270 SLEEP_ON_HEAD
01271 timeout = schedule_timeout(timeout);
01272 SLEEP_ON_TAIL
01273
01274 return timeout;
01275 }
01276
01277 void fastcall sleep_on(wait_queue_head_t *q)
01278 {
01279 SLEEP_ON_VAR
01280
01281 current->state = TASK_UNINTERRUPTIBLE;
01282 current->pcb_w_state = 0;
01283
01284 SLEEP_ON_HEAD
01285 schedule();
01286 SLEEP_ON_TAIL
01287 }
01288
01289 long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout)
01290 {
01291 SLEEP_ON_VAR
01292
01293 current->state = TASK_UNINTERRUPTIBLE;
01294 current->pcb_w_state = 0;
01295
01296 SLEEP_ON_HEAD
01297 timeout = schedule_timeout(timeout);
01298 SLEEP_ON_TAIL
01299
01300 return timeout;
01301 }
01302
01303 void scheduling_functions_end_here(void) { }
01304
01305 #ifndef __alpha__
01306
01307
01308
01309
01310
01311
01312
01313 asmlinkage long sys_nice(int increment)
01314 {
01315 long newprio;
01316
01317
01318
01319
01320
01321
01322 if (increment < 0) {
01323 if (!capable(CAP_SYS_NICE))
01324 return -EPERM;
01325 if (increment < -40)
01326 increment = -40;
01327 }
01328 if (increment > 40)
01329 increment = 40;
01330
01331 newprio = current->pcb_b_prib + increment;
01332 if (newprio < -20)
01333 newprio = -20;
01334 if (newprio > 19)
01335 newprio = 19;
01336 current->pcb_b_prib = newprio;
01337 return 0;
01338 }
01339
01340 #endif
01341
01342 inline struct task_struct *find_process_by_pid(pid_t pid)
01343 {
01344 struct task_struct *tsk = current;
01345
01346 if (pid)
01347 tsk = find_task_by_pid(pid);
01348 if (tsk)
01349 return tsk;
01350 long *x=&tsk;
01351 printk(KERN_EMERG "FIND %d %x %x %x %x %x %x %x %x %x %x\n",pid,pid,x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8]);
01352 return 0;
01353 }
01354
01355 static int setscheduler(pid_t pid, int policy,
01356 struct sched_param *param)
01357 {
01358 struct sched_param lp;
01359 struct task_struct *p;
01360 int retval;
01361
01362 retval = -EINVAL;
01363 if (!param || pid < 0)
01364 goto out_nounlock;
01365
01366 retval = -EFAULT;
01367 if (copy_from_user(&lp, param, sizeof(struct sched_param)))
01368 goto out_nounlock;
01369
01370
01371
01372
01373 read_lock_irq(&tasklist_lock);
01374 spin_lock(&runqueue_lock);
01375
01376 p = find_process_by_pid(pid);
01377
01378 retval = -ESRCH;
01379 if (!p)
01380 goto out_unlock;
01381
01382 if (policy < 0)
01383 policy = p->pcb_l_sched_policy;
01384 else {
01385 retval = -EINVAL;
01386 if (policy != PCB_K_SCHED_FIFO && policy != PCB$K_SCHED_RR &&
01387 policy != PCB_K_SCHED_OTHER)
01388 goto out_unlock;
01389 }
01390
01391
01392
01393
01394
01395 retval = -EINVAL;
01396 if (lp.sched_priority < 0 || lp.sched_priority > 99)
01397 goto out_unlock;
01398 if ((policy == PCB_K_SCHED_OTHER) != (lp.sched_priority == 0))
01399 goto out_unlock;
01400
01401 retval = -EPERM;
01402 if ((policy == PCB_K_SCHED_FIFO || policy == PCB$K_SCHED_RR) &&
01403 !capable(CAP_SYS_NICE))
01404 goto out_unlock;
01405 if ((current->euid != p->euid) && (current->euid != p->uid) &&
01406 !capable(CAP_SYS_NICE))
01407 goto out_unlock;
01408
01409 retval = 0;
01410 p->pcb_l_sched_policy = policy;
01411
01412
01413
01414
01415 #if 0
01416 current->need_resched = 1;
01417 #endif
01418
01419 out_unlock:
01420 spin_unlock(&runqueue_lock);
01421 read_unlock_irq(&tasklist_lock);
01422
01423 out_nounlock:
01424 return retval;
01425 }
01426
01427 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
01428 struct sched_param *param)
01429 {
01430 if (pid>0)
01431 pid = exe_epid_to_ipid(pid);
01432 return setscheduler(pid, policy, param);
01433 }
01434
01435 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param)
01436 {
01437 if (pid>0)
01438 pid = exe_epid_to_ipid(pid);
01439 return setscheduler(pid, -1, param);
01440 }
01441
01442 asmlinkage long sys_sched_getscheduler(pid_t pid)
01443 {
01444 struct task_struct *p;
01445 int retval;
01446
01447 if (pid>0)
01448 pid = exe_epid_to_ipid(pid);
01449
01450 retval = -EINVAL;
01451 if (pid < 0)
01452 goto out_nounlock;
01453
01454 retval = -ESRCH;
01455 read_lock(&tasklist_lock);
01456 p = find_process_by_pid(pid);
01457 if (p)
01458 retval = p->pcb_l_sched_policy & ~SCHED_YIELD;
01459 read_unlock(&tasklist_lock);
01460
01461 out_nounlock:
01462 return retval;
01463 }
01464
01465 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param)
01466 {
01467 struct task_struct *p;
01468 struct sched_param lp;
01469 int retval;
01470
01471 if (pid>0)
01472 pid = exe_epid_to_ipid(pid);
01473
01474 retval = -EINVAL;
01475 if (!param || pid < 0)
01476 goto out_nounlock;
01477
01478 read_lock(&tasklist_lock);
01479 p = find_process_by_pid(pid);
01480 retval = -ESRCH;
01481 if (!p)
01482 goto out_unlock;
01483
01484 read_unlock(&tasklist_lock);
01485
01486
01487
01488
01489 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
01490
01491 out_nounlock:
01492 return retval;
01493
01494 out_unlock:
01495 read_unlock(&tasklist_lock);
01496 return retval;
01497 }
01498
01499 asmlinkage long sys_sched_yield(void)
01500 {
01501
01502
01503
01504
01505
01506
01507
01508
01509 int nr_pending = nr_running;
01510
01511 #if CONFIG_SMP
01512 #if 0
01513
01514 int i;
01515
01516
01517 for (i = 0; i < smp_num_cpus; i++) {
01518 int cpu = cpu_logical_map(i);
01519 if (aligned_data[cpu].schedule_data.curr != idle_task(cpu))
01520 nr_pending--;
01521 }
01522 #endif
01523 #else
01524
01525 nr_pending--;
01526 #endif
01527 if (nr_pending) {
01528
01529
01530
01531
01532
01533
01534 #if 0
01535 current->need_resched = 1;
01536 #endif
01537
01538 spin_lock_irq(&runqueue_lock);
01539
01540 spin_unlock_irq(&runqueue_lock);
01541 }
01542 return 0;
01543 }
01544
01545 asmlinkage long sys_sched_get_priority_max(int policy)
01546 {
01547 int ret = -EINVAL;
01548
01549 switch (policy) {
01550 case PCB_K_SCHED_FIFO:
01551 case PCB_K_SCHED_RR:
01552 ret = 99;
01553 break;
01554 case PCB_K_SCHED_OTHER:
01555 ret = 0;
01556 break;
01557 }
01558 return ret;
01559 }
01560
01561 asmlinkage long sys_sched_get_priority_min(int policy)
01562 {
01563 int ret = -EINVAL;
01564
01565 switch (policy) {
01566 case PCB_K_SCHED_FIFO:
01567 case PCB_K_SCHED_RR:
01568 ret = 1;
01569 break;
01570 case PCB_K_SCHED_OTHER:
01571 ret = 0;
01572 }
01573 return ret;
01574 }
01575
01576 asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
01577 {
01578 struct timespec t;
01579 struct task_struct *p;
01580 int retval = -EINVAL;
01581
01582 if (pid>0)
01583 pid = exe_epid_to_ipid(pid);
01584
01585 if (pid < 0)
01586 goto out_nounlock;
01587
01588 retval = -ESRCH;
01589 read_lock(&tasklist_lock);
01590 p = find_process_by_pid(pid);
01591 if (p)
01592 jiffies_to_timespec(p->pcb_l_sched_policy & PCB$K_SCHED_FIFO ? 0 : NICE_TO_TICKS(p->pcb$b_prib), &t);
01593 read_unlock(&tasklist_lock);
01594 if (p)
01595 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
01596 out_nounlock:
01597 return retval;
01598 out_unlock:
01599 read_unlock(&tasklist_lock);
01600 return retval;
01601 }
01602
01603 static void show_task(struct task_struct * p)
01604 {
01605 unsigned long free = 0;
01606 int state;
01607 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
01608
01609 printk(KERN_EMERG "%-13.13s ", p->pcb_t_lname);
01610 state = p->state ? ffz(~p->state) + 1 : 0;
01611 if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
01612 printk(stat_nam[state]);
01613 else
01614 printk(KERN_EMERG " ");
01615 #if (BITS_PER_LONG == 32)
01616 if (p == current)
01617 printk(KERN_EMERG " current ");
01618 else
01619 printk(KERN_EMERG " %08lX ", thread_saved_pc(&p->thread));
01620 #else
01621 if (p == current)
01622 printk(KERN_EMERG " current task ");
01623 else
01624 printk(KERN_EMERG " %016lx ", thread_saved_pc(&p->thread));
01625 #endif
01626 {
01627 unsigned long * n = (unsigned long *) (p+1);
01628 while (!*n)
01629 n++;
01630 free = (unsigned long) n - (unsigned long)(p+1);
01631 }
01632 printk(KERN_EMERG "%5lu %5d %6d ", free, p->pcb_l_pid, p->p_pptr->pcb$l_pid);
01633 if (p->p_cptr)
01634 printk(KERN_EMERG "%5d ", p->p_cptr->pcb_l_pid);
01635 else
01636 printk(KERN_EMERG " ");
01637 if (!p->mm)
01638 printk(KERN_EMERG " (L-TLB)\n");
01639 else
01640 printk(KERN_EMERG " (NOTLB)\n");
01641
01642 {
01643 extern void show_trace_task(struct task_struct *tsk);
01644 show_trace_task(p);
01645 }
01646 }
01647
01648 char * render_sigset_t(sigset_t *set, char *buffer)
01649 {
01650 int i = _NSIG, x;
01651 do {
01652 i -= 4, x = 0;
01653 if (sigismember(set, i+1)) x |= 1;
01654 if (sigismember(set, i+2)) x |= 2;
01655 if (sigismember(set, i+3)) x |= 4;
01656 if (sigismember(set, i+4)) x |= 8;
01657 *buffer++ = (x < 10 ? '0' : 'a' - 10) + x;
01658 } while (i >= 4);
01659 *buffer = 0;
01660 return buffer;
01661 }
01662
01663 void show_state(void)
01664 {
01665 struct task_struct *p;
01666
01667 #if (BITS_PER_LONG == 32)
01668 printk(KERN_EMERG "\n"
01669 " free sibling\n");
01670 printk(KERN_EMERG " task PC stack pid father child younger older\n");
01671 #else
01672 printk(KERN_EMERG "\n"
01673 " free sibling\n");
01674 printk(KERN_EMERG " task PC stack pid father child younger older\n");
01675 #endif
01676 read_lock(&tasklist_lock);
01677 for_each_task_pre1(p) {
01678
01679
01680
01681
01682 touch_nmi_watchdog();
01683 show_task(p);
01684 }
01685 for_each_task_post1(p);
01686 read_unlock(&tasklist_lock);
01687 }
01688
01689
01690
01691
01692
01693
01694
01695
01696
01697
01698
01699
01700
01701 void reparent_to_init(void)
01702 {
01703 struct task_struct *this_task = current;
01704
01705 write_lock_irq(&tasklist_lock);
01706
01707
01708 REMOVE_LINKS(this_task);
01709 this_task->p_pptr = child_reaper;
01710 this_task->p_opptr = child_reaper;
01711 SET_LINKS(this_task);
01712
01713
01714 this_task->exit_signal = SIGCHLD;
01715
01716
01717
01718 spin_lock(&runqueue_lock);
01719
01720 this_task->ptrace = 0;
01721 this_task->pcb_b_prib = DEFPRI;
01722 this_task->pcb_l_sched_policy = PCB$K_SCHED_OTHER;
01723
01724
01725
01726 this_task->cap_effective = CAP_INIT_EFF_SET;
01727 this_task->cap_inheritable = CAP_INIT_INH_SET;
01728 this_task->cap_permitted = CAP_FULL_SET;
01729 this_task->keep_capabilities = 0;
01730 memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim)));
01731 this_task->user = INIT_USER;
01732
01733 spin_unlock(&runqueue_lock);
01734 write_unlock_irq(&tasklist_lock);
01735 }
01736
01737
01738
01739
01740
01741
01742 void daemonize(void)
01743 {
01744 struct fs_struct *fs;
01745
01746
01747
01748
01749
01750
01751
01752 exit_mm(current);
01753
01754 current->session = 1;
01755 current->pgrp = 1;
01756 current->tty = NULL;
01757
01758
01759
01760 exit_fs(current);
01761 fs = init_task.fs;
01762 current->fs = fs;
01763 atomic_inc(&fs->count);
01764 exit_files(current);
01765 current->files = init_task.files;
01766 atomic_inc(¤t->files->count);
01767 }
01768
01769 extern unsigned long wait_init_idle;
01770
01771 extern struct _phd system_phd;
01772
01773 void __init init_idle(void)
01774 {
01775 struct _pcb * cur = smp_gl_cpu_data[smp_processor_id()]->cpu$l_curpcb;
01776
01777
01778
01779
01780
01781
01782
01783 clear_bit(cur->pcb_l_cpu_id, &wait_init_idle);
01784 cur->psl=0;
01785 cur->pslindex=0;
01786 cur->pcb_b_asten=15;
01787 cur->phd_b_astlvl=4;
01788 cur->pr_astlvl=4;
01789 cur->pcb_l_phd=&system_phd;
01790
01791 printk("done init_idle\n");
01792 done_init_idle=1;
01793 }
01794
01795 extern void init_timervecs (void);
01796
01797 extern unsigned long pcbvec[];
01798 extern unsigned long seqvec[];
01799
01800 void __init sched_init(void)
01801 {
01802
01803
01804
01805
01806 int cpuid = smp_processor_id();
01807 int nr;
01808 struct _cpu * cpu;
01809
01810 init_task.pcb_l_cpu_id = cpuid;
01811 cpu=smp_gl_cpu_data[cpuid];
01812
01813 init_task.pcb_b_type = DYN$C_PCB;
01814
01815 init_task.pcb_b_pri=31;
01816 init_task.pcb_b_prib=31;
01817 qhead_init(&init_task.pcb_l_astqfl);
01818 cpu->cpu_l_curpcb=&init_task;
01819 cpu->cpu_b_cur_pri=31;
01820
01821 sch_gl_pcbvec=pcbvec;
01822 sch_gl_seqvec=seqvec;
01823 memset(sch_gl_pcbvec,0,(unsigned long)MAXPROCESSCNT*sizeof(unsigned long));
01824 memset(sch_gl_seqvec,0,(unsigned long)MAXPROCESSCNT*sizeof(unsigned long));
01825
01826 printk("pid 0 here %x %x\n",init_task.pcb_l_astqfl,&init_task.pcb$l_astqfl);
01827
01828
01829 init_timervecs();
01830
01831 #ifndef CONFIG_VMS
01832 init_bh(TIMER_BH, timer_bh);
01833 init_bh(TQUEUE_BH, tqueue_bh);
01834 init_bh(IMMEDIATE_BH, immediate_bh);
01835 #endif
01836
01837
01838
01839
01840 atomic_inc(&init_mm.mm_count);
01841 enter_lazy_tlb(&init_mm, current, cpuid);
01842 }
01843
01844