00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include <far_pointers.h>
00012 #include <linux/sched.h>
00013 #include <ssdef.h>
00014 #include <cpbdef.h>
00015 #include <ipldef.h>
00016 #include <statedef.h>
00017 #include <internals.h>
00018 #include <smp_routines.h>
00019 #include <asm/hw_irq.h>
00020 #include <prvdef.h>
00021 #include <exe_routines.h>
00022 #include <starlet.h>
00023 #include <capdef.h>
00024
00025 int sch_calculate_affinity(int mask) {
00026 int i;
00027 int new_mask = 0;
00028 for(i=0; i<32; i++) {
00029 if ((mask & smp_gl_cpu_data[i]->cpu$l_capability) == mask)
00030 new_mask |= 1 << i;
00031 }
00032 return new_mask;
00033 }
00034
00035 int sch_clear_affinity(unsigned int cpu_mask, struct _pcb *pcb, unsigned int flags, UINT64_PQ prev_mask_p) {
00036 if (prev_mask_p)
00037 *prev_mask_p = pcb->pcb_l_affinity;
00038 pcb->pcb_l_affinity &= ~cpu_mask;
00039 #if 0
00040 if (flags & CPB_M_FLAG_PERMANENT)
00041 pcb->pcb_l_permanent_cpu_affinity = pcb->pcb$l_affinity;
00042 #endif
00043 pcb->pcb_l_current_affinity = sch$calculate_affinity(pcb->pcb$l_affinity);
00044 return SS__NORMAL;
00045 }
00046
00047
00048
00049
00050
00051 int sch_add_cpu_cap(unsigned int cpu_id, unsigned int mask, UINT64_PQ prev_mask_p) {
00052 if (prev_mask_p)
00053 *prev_mask_p = smp_gl_cpu_data[cpu_id]->cpu$l_capability;
00054 smp_gl_cpu_data[cpu_id]->cpu$l_capability |= mask;
00055 sch_al_cpu_cap[cpu_id]=smp$gl_cpu_data[cpu_id]->cpu$l_capability;
00056
00057 sch_gl_capability_sequence++;
00058 return SS__NORMAL;
00059 }
00060
00061
00062
00063
00064
00065 int sch_clear_capability(struct _pcb *pcb, unsigned int mask, unsigned int cpu_id, unsigned int flags, UINT64_PQ prev_mask_p) {
00066 if (prev_mask_p)
00067 *prev_mask_p = pcb->pcb_l_capability;
00068 int mask2 = 1 << cpu_id;
00069 pcb->pcb_l_capability &= ~mask2;
00070 return SS__NORMAL;
00071 }
00072
00073
00074
00075
00076
00077 int sch_set_affinity(unsigned int cpu_mask, struct _pcb *pcb, unsigned int flags, UINT64_PQ prev_mask_p) {
00078 if (prev_mask_p)
00079 *prev_mask_p = pcb->pcb_l_affinity;
00080
00081 pcb->pcb_l_affinity |= cpu_mask;
00082 #if 0
00083 if (flags & CPB_M_FLAG_PERMANENT)
00084 pcb->pcb_l_permanent_cpu_affinity = pcb->pcb$l_affinity;
00085 #endif
00086 pcb->pcb_l_current_affinity = sch$calculate_affinity(pcb->pcb$l_affinity);
00087 return SS__NORMAL;
00088 }
00089
00090
00091
00092
00093
00094
00095
00096 sch_remove_cpu_cap(unsigned int cpu_id, unsigned int mask, UINT64_PQ prev_mask_p) {
00097 if (prev_mask_p)
00098 *prev_mask_p = smp_gl_cpu_data[cpu_id]->cpu$l_capability;
00099
00100 smp_gl_cpu_data[cpu_id]->cpu$l_capability &= ~mask;
00101
00102 sch_al_cpu_cap[cpu_id]=smp$gl_cpu_data[cpu_id]->cpu$l_capability;
00103
00104 sch_gl_capability_sequence++;
00105 struct _pcb * pcb = smp_gl_cpu_data[cpu_id]->cpu$l_curpcb;
00106
00107 if ((pcb->pcb_l_capability & smp$gl_cpu_data[cpu_id]->cpu$l_capability) != pcb->pcb$l_capability) {
00108
00109 if (ctl_gl_pcb->pcb$l_cpu_id!=cpu_id)
00110 smp_send_work(CPU_M_RESCHED, cpu_id);
00111 else
00112 SOFTINT_RESCHED_VECTOR;
00113 }
00114 return SS__NORMAL;
00115 }
00116
00117
00118
00119
00120
00121 int sch_set_capability(struct _pcb *pcb, unsigned int mask, unsigned int cpu_id, unsigned int flags, UINT64_PQ prev_mask_p) {
00122 if (prev_mask_p)
00123 *prev_mask_p = pcb->pcb_l_capability;
00124 int mask2 = 1 << cpu_id;
00125 pcb->pcb_l_capability |= mask2;
00126 return SS__NORMAL;
00127 }
00128
00129
00130
00131
00132
00133 int sch_acquire_affinity(struct _pcb *pcb, int obsolete, int cpu_id) {
00134
00135 pcb->pcb_b_affinity_skip = sch$gl_affinity_skip;
00136
00137
00138 pcb->pcb_l_capability |= CPB$M_IMPLICIT_AFFINITY;
00139
00140 pcb->pcb_l_affinity = cpu_id;
00141
00142 if (pcb->pcb_w_state == SCH$C_CUR) {
00143
00144 if (pcb->pcb_l_affinity != pcb->pcb$l_cpu_id) {
00145 if (ctl_gl_pcb->pcb$l_cpu_id!=pcb->pcb$l_cpu_id)
00146 smp_send_work(CPU_M_RESCHED, pcb->pcb$l_cpu_id);
00147 else
00148 SOFTINT_RESCHED_VECTOR;
00149 }
00150 }
00151 return SS__NORMAL;
00152 }
00153
00154
00155
00156
00157
00158 int sch_release_affinity(struct _pcb *pcb) {
00159
00160
00161 pcb->pcb_l_capability &= ~CPB$M_IMPLICIT_AFFINITY;
00162 return SS__NORMAL;
00163 }
00164
00165 int scs_std_change_affinity( struct _ucb *ucb_p );
00166
00167 int sch_release_capability(struct _pcb *pcb, unsigned int mask, unsigned int cpu_id, unsigned int flags, UINT64_PQ prev_mask_p) {
00168 pcb->pcb_l_capability &= ~mask;
00169 pcb->pcb_l_current_affinity = sch$calculate_affinity(pcb->pcb$l_capability);
00170 return SS__NORMAL;
00171 }
00172
00173
00174
00175
00176
00177 int sch_require_capability(struct _pcb *pcb, unsigned int mask, unsigned int cpu_id, unsigned int flags, UINT64_PQ prev_mask_p) {
00178
00179 int oldipl = vmslock(&SPIN_SCHED, IPL__SCHED);
00180
00181 if (prev_mask_p)
00182 *prev_mask_p = pcb->pcb_l_capability;
00183
00184 pcb->pcb_l_capability |= mask;
00185 #if 0
00186 if (*prev_mask_p != pcb->pcb_l_capability) {
00187 smp_gl_cpu_data[pcb->pcb$l_affinity]->cpu$l_hardaff++;
00188 smp_gl_cpu_data[cpu_id]->cpu$l_hardaff++;
00189 }
00190 #endif
00191 pcb->pcb_l_affinity = cpu_id;
00192
00193 if (flags & CPB_M_FLAG_PERMANENT)
00194 pcb->pcb_l_permanent_cpu_affinity = pcb->pcb$l_affinity;
00195
00196 pcb->pcb_l_current_affinity = sch$calculate_affinity(pcb->pcb$l_capability);
00197
00198 if (ctl_gl_pcb == pcb) {
00199 if ((pcb->pcb_l_current_affinity & (1 << pcb->pcb$l_cpu_id)) == 0) {
00200
00201 if (pcb->pcb_l_affinity != pcb->pcb$l_cpu_id) {
00202
00203 if (ctl_gl_pcb->pcb$l_cpu_id!=pcb->pcb$l_cpu_id)
00204 smp_send_work(CPU_M_RESCHED, pcb->pcb$l_cpu_id);
00205 else
00206 SOFTINT_RESCHED_VECTOR;
00207 }
00208 }
00209 }
00210
00211 vmsunlock(&SPIN_SCHED, oldipl);
00212 return SS__NORMAL;
00213 }
00214
00215 static int change_bits (int bits, int select_mask, int modify_mask) {
00216 int other = bits & ~select_mask;
00217 bits = bits & select_mask;
00218 bits = bits | modify_mask;
00219 return bits | other;
00220 }
00221
00222
00223
00224
00225
00226
00227 asmlinkage int exe_cpu_capabilities (int cpu_id, struct _generic_64 *select_mask, struct _generic_64 *modify_mask, struct _generic_64 *prev_mask, struct _generic_64 *flags) {
00228 int flag = 0;
00229 if (flags)
00230 flag = *(int*)flags;
00231 int sts;
00232 int priv = ctl_gl_pcb->pcb$l_priv;
00233 if ((select_mask || modify_mask) && (priv & PRV_M_WORLD) == 0)
00234 return SS__NOPRIV;
00235
00236 if ((select_mask || modify_mask) && (priv & PRV_M_ALTPRI) == 0)
00237 return SS__NOPRIV;
00238 if ((select_mask || modify_mask)) {
00239 int mask;
00240 if (flag & CAP_M_FLAG_DEFAULT_ONLY) {
00241 if (prev_mask)
00242 *(int*)prev_mask = sch_gl_default_cpu_cap;
00243 sch_gl_default_cpu_cap = change_bits (sch$gl_default_cpu_cap, *(int*)select_mask, *(int*)modify_mask);
00244 } else {
00245 int prev = smp_gl_cpu_data[cpu_id]->cpu$l_capability;
00246 int mask;
00247 mask = *(int*)select_mask & (*(int*)modify_mask);
00248 sch_add_cpu_cap(cpu_id, mask, 0 );
00249 mask = *(int*)select_mask & ~(*(int*)modify_mask);
00250 sch_remove_cpu_cap(cpu_id, mask, 0 );
00251 if (prev_mask)
00252 *(int*)prev_mask = prev;
00253 }
00254 } else {
00255 if (prev_mask)
00256 *(int*)prev_mask = smp_gl_cpu_data[cpu_id]->cpu$l_capability;
00257 }
00258 vmsunlock(&SPIN_SCHED,IPL__ASTDEL);
00259 return SS__NORMAL;
00260 }
00261
00262
00263
00264
00265
00266
00267 asmlinkage int exe_process_capabilities (unsigned int *pidadr, void *prcnam, struct _generic_64 *select_mask, struct _generic_64 *modify_mask, struct _generic_64 *prev_mask, struct _generic_64 *flags) {
00268 int flag = 0;
00269 if (flags)
00270 flag = *(int*)flags;
00271 struct _pcb * retpcb;
00272 unsigned long ipid, epid;
00273 int sts;
00274 sts=exe_nampid(current,pidadr,prcnam,&retpcb,&ipid,&epid);
00275 if ((sts & 1) == 0)
00276 return sts;
00277 struct _pcb * pcb;
00278 pcb = retpcb;
00279 int priv = ctl_gl_pcb->pcb$l_priv;
00280 #if 0
00281
00282 if (pcb != ctl_gl_pcb && (select_mask || modify_mask) && (priv & PRV$M_WORLD) == 0)
00283 return SS__NOPRIV;
00284 #endif
00285
00286 if (pcb != ctl_gl_pcb && (select_mask || modify_mask) && (priv & PRV$M_ALTPRI) == 0)
00287 return SS__NOPRIV;
00288 if ((select_mask || modify_mask)) {
00289 int mask;
00290 if (flag & CAP_M_FLAG_DEFAULT_ONLY) {
00291 if (prev_mask)
00292 *(int*)prev_mask = sch_gl_default_process_cap;
00293 sch_gl_default_process_cap = change_bits (sch$gl_default_process_cap, *(int*)select_mask, *(int*)modify_mask);
00294 } else {
00295 int prev = pcb->pcb_l_capability;
00296 int mask;
00297 mask = *(int*)select_mask & ~(*(int*)modify_mask);
00298 #if 0
00299 sch_release_capability(pcb, mask, 0, 0 );
00300 #else
00301
00302 sch_clear_capability (pcb, mask, pcb->pcb$l_cpu_id, flags, 0);
00303 #endif
00304 mask = *(int*)select_mask & (*(int*)modify_mask);
00305 #if 0
00306 sch_require_capability(pcb, mask, 0, 0 );
00307 #else
00308
00309 sch_clear_capability (pcb, mask, pcb->pcb$l_cpu_id, flags, 0);
00310 #endif
00311 if (prev_mask)
00312 *(int*)prev_mask = prev;
00313 }
00314 } else {
00315 if (prev_mask)
00316 *(int*)prev_mask = pcb->pcb_l_capability;
00317 }
00318 vmsunlock(&SPIN_SCHED,IPL__ASTDEL);
00319 return SS__NORMAL;
00320 }
00321
00322 asmlinkage int exe_process_capabilities_wrap (struct struct_args * s) {
00323 return exe_process_capabilities (s->s1, s->s2, s->s3, s->s4, s->s5, s->s6);
00324 }
00325