00001
00002
00003
00004
00005
00006
00007 #include <linux/config.h>
00008 #include <linux/slab.h>
00009 #include <linux/shm.h>
00010 #include <linux/mman.h>
00011 #include <linux/pagemap.h>
00012 #include <linux/swap.h>
00013 #include <linux/swapctl.h>
00014 #include <linux/smp_lock.h>
00015 #include <linux/init.h>
00016 #include <linux/file.h>
00017 #include <linux/fs.h>
00018 #include <linux/personality.h>
00019
00020 #include <asm/uaccess.h>
00021 #include <asm/pgalloc.h>
00022
00023 #include<linux/unistd.h>
00024 #include<linux/linkage.h>
00025 #include<linux/sched.h>
00026 #include <system_data_cells.h>
00027 #include <linux/mm.h>
00028 #include <acbdef.h>
00029 #include <fcbdef.h>
00030 #include <ipl.h>
00031 #include <ipldef.h>
00032 #include <mmgdef.h>
00033 #include <pfndef.h>
00034 #include <phddef.h>
00035 #include <rdedef.h>
00036 #include <secdef.h>
00037 #include <ssdef.h>
00038 #include <vmspte.h>
00039 #include <wcbdef.h>
00040 #include <wsldef.h>
00041 #include <va_rangedef.h>
00042 #include <pfldef.h>
00043 #include <mmg_functions.h>
00044
00045 #include <linux/vmalloc.h>
00046
00047 #ifdef __x86_64__
00048 #include <asm/hardirq.h>
00049 #endif
00050
00051 #include <misc_routines.h>
00052 #include <mmg_routines.h>
00053 #include <sch_routines.h>
00054 #include <internals.h>
00055
00056 int do_wp_page(struct mm_struct *mm, struct _rde * vma, unsigned long address, pte_t *page_table, pte_t pte);
00057
00058 #ifdef __x86_64__
00059 #include <asm/kdebug.h>
00060 #else
00061 extern void die(const char *,struct pt_regs *,long);
00062 asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
00063 #endif
00064
00065 #undef OLDINT
00066 #define OLDINT
00067
00068 #ifdef __x86_64__
00069 #undef OLDINT
00070 #endif
00071
00072 unsigned long findpte_new(struct mm_struct *mm, unsigned long address);
00073
00074 #ifdef __arch_um__
00075 #include "user_util.h"
00076
00077 #define yield() do { sch_resched(); myrei(); } while(0)
00078 #endif
00079
00080
00081 #ifdef CONFIG_MM_VMS
00082
00083 signed int mmg_ininewpfn(struct _pcb * p, struct _phd * phd, void * va, struct _mypte * pte) {
00084 int ipl=getipl();
00085 setipl(8);
00086 signed long pfn=mmg_allocpfn();
00087 setipl(ipl);
00088 struct _pfn * page;
00089 if (pfn&0x80000000) return pfn;
00090 if ((((int)va)&WSL_M_PAGTYP)>=WSL$C_GLOBAL) {
00091 phd=mmg_gl_sysphd;
00092 pte=&((struct _mypte *)mmg_gq_gpt_base)[pte->pte$v_gptx];
00093
00094 }
00095 if ((((unsigned long)va)&0x80000000) == 0) {
00096 mmg_incptref(p->pcb$l_phd,pte);
00097 }
00098
00099
00100 mem_map[pfn].pfn_v_pagtyp=((unsigned long)va)&PFN$M_PAGTYP;
00101
00102
00103 mem_map[pfn].pfn_l_pt_pfn=0;
00104 #if 0
00105 #ifdef __i386__
00106
00107 mem_map[pfn].pfn_l_pt_pfn=va;
00108 #endif
00109 #endif
00110 mem_map[pfn].pfn_q_pte_index=0;
00111 mem_map[pfn].pfn_q_pte_index=pte;
00112
00113 page=&mem_map[pfn];
00114
00115 mem_map[pfn].pfn_l_refcnt=1;
00116
00117 mmg_makewsle(p,p->pcb$l_phd,va,pte,pfn);
00118 return pfn;
00119 }
00120
00121 int mmg_incptref(struct _phd * phd, struct _mypte * pte) {
00122 #ifdef __arch_um__
00123 signed long pfn=__pa(pte->pte_v_pfn << PAGE_SHIFT) >> PAGE_SHIFT ;
00124 #else
00125 signed long pfn=pte->pte_v_pfn;
00126 #endif
00127 if (mem_map[pfn].pfn_v_pagtyp==PFN$C_GLOBAL || mem_map[pfn].pfn$v_pagtyp==PFN$C_GBLWRT) {
00128 mem_map[pfn].pfn_l_shrcnt++;
00129 if (mem_map[pfn].pfn_l_shrcnt==1) {
00130
00131 }
00132 }
00133 mem_map[pfn].pfn_l_refcnt++;
00134 }
00135
00136 int mmg_decptref(struct _phd * phd, struct _mypte * pte) {
00137 signed long pfn=pte->pte_v_pfn;
00138 if (mem_map[pfn].pfn_v_pagtyp==PFN$C_GLOBAL || mem_map[pfn].pfn$v_pagtyp==PFN$C_GBLWRT) {
00139 mem_map[pfn].pfn_l_shrcnt--;
00140 if (mem_map[pfn].pfn_l_shrcnt==0) {
00141
00142 }
00143 }
00144 mem_map[pfn].pfn_l_refcnt--;
00145 }
00146
00147 int mmg_makewsle(struct _pcb * p, struct _phd * phd, void * va, void * pte, signed int pfn) {
00148 int new=p->pcb_l_phd->phd$l_wsnext++;
00149 struct _wsl * wsl = p->pcb_l_phd->phd$l_wslist;
00150 struct _wsl * wsle = &wsl[new];
00151 struct _pfn * page;
00152 if (wsle->wsl_v_valid) panic("should be invalid\n");
00153 wsle->wsl_v_valid=1;
00154 wsle->wsl_v_pagtyp=mem_map[pfn].pfn$v_pagtyp;
00155 wsle->wsl_pq_va=((unsigned long)wsle->wsl$pq_va)|(unsigned long)va;
00156
00157
00158 page=&mem_map[pfn];
00159 page->pfn_l_wslx_qw=new;
00160
00161 if ((((unsigned long)wsle->wsl_pq_va)&WSL$M_PAGTYP)==WSL$C_GLOBAL)
00162 p->pcb_l_gpgcnt++;
00163 if ((((unsigned long)wsle->wsl_pq_va)&WSL$M_PAGTYP)==WSL$C_PROCESS)
00164 p->pcb_l_ppgcnt++;
00165
00166 }
00167
00168
00169
00170 static inline int expand_stack2(struct _rde * vma, unsigned long address)
00171 {
00172 unsigned long grow;
00173
00174
00175
00176
00177
00178
00179 address &= PAGE_MASK;
00180
00181 grow = ((int)(vma->rde_pq_start_va - address)) >> PAGE_SHIFT;
00182 if (vma->rde_pq_start_va + vma->rde$q_region_size - address > current->rlim[RLIMIT_STACK].rlim_cur || 1 ) {
00183
00184
00185 return -ENOMEM;
00186 }
00187 vma->rde_pq_start_va = address;
00188
00189 current->active_mm->total_vm += grow;
00190
00191
00192
00193 return 0;
00194 }
00195
00196 struct pfast {
00197 struct _wcb * window;
00198 unsigned long offset;
00199 unsigned long pfn;
00200 unsigned long address;
00201 pte_t * pte;
00202 unsigned long pteentry;
00203 struct _rde * rde;
00204 };
00205
00206 void pagefaultast(struct pfast * p) {
00207 int res;
00208 if (p->window->wcb_l_fcb->fcb$l_fill_5)
00209 block_read_full_page3(p->window->wcb_l_fcb, &mem_map[p->pfn], p->offset);
00210 else
00211 ods2_block_read_full_page3(p->window, &mem_map[p->pfn], p->offset);
00212 *(unsigned long *)(p->pte)&=0xfffff000;
00213 *(unsigned long *)(p->pte)|=p->pteentry;
00214
00215 #if 0
00216
00217 if ((p->rde->rde_l_flags)&VM_WRITE)
00218 *(unsigned long *)(p->pte)|=_PAGE_RW|_PAGE_DIRTY;
00219 #endif
00220
00221 #ifdef __arch_um__
00222 *(unsigned long *)(p->pte)|=_PAGE_NEWPAGE;
00223 #endif
00224 flush_tlb_range(current->mm, p->address, p->address + PAGE_SIZE);
00225
00226 extern int myswapfile;
00227 struct _pfl * pfl = myswapfile;
00228 if (pfl && p->window==pfl->pfl_l_window)
00229 mmg_dallocpagfil1(p->offset>>PAGE_SHIFT);
00230
00231 kfree(p);
00232 }
00233
00234 extern int astdeb;
00235
00236 extern int in_atomic;
00237
00238
00239 int makereadast(unsigned long window, unsigned long pfn, unsigned long address, unsigned long pte, unsigned long offset, unsigned long write_flag) {
00240
00241 struct _acb * a=kmalloc(sizeof(struct _acb),GFP_KERNEL);
00242 struct pfast * pf=kmalloc(sizeof(struct pfast),GFP_KERNEL);
00243 struct _rde * rde;
00244 pf->window=window;
00245 pf->pfn=pfn;
00246 pf->address=address&0xfffff000;
00247 pf->offset=offset;
00248 pf->pte=pte;
00249 rde=mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
00250 pf->pteentry=rde->rde_r_regprot.regprt$l_region_prot;
00251 if (write_flag) {
00252 pf->pteentry|=_PAGE_RW|_PAGE_DIRTY;
00253 mem_map[pfn].pfn_l_page_state=PFN$M_MODIFY;
00254 }
00255 pf->rde=rde;
00256 a->acb_b_rmod=0;
00257 a->acb_l_kast=0;
00258 a->acb_l_ast=pagefaultast;
00259 a->acb_l_astprm=pf;
00260 astdeb=1;
00261 sch_qast(current->pcb$l_pid,0,a);
00262 }
00263
00264
00265 #ifdef __i386__
00266
00267 #define _PAGE_NEWPAGE 0
00268
00269 extern unsigned long idt;
00270
00271 #undef DEBUG_PF
00272 #define DEBUG_PF
00273
00274 #ifdef DEBUG_PF
00275 long dpf[32*1024];
00276 long dpfc[32] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
00277 #endif
00278
00279 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) {
00280 struct task_struct *tsk;
00281 struct mm_struct *mm;
00282 struct _rde * vma;
00283 unsigned long address;
00284 unsigned long page;
00285 signed long pfn;
00286 unsigned long fixup;
00287 int write;
00288 siginfo_t info;
00289 pgd_t *pgd;
00290 pud_t *pud;
00291 pmd_t *pmd;
00292 pte_t *pte;
00293 struct _mypte * mypte;
00294
00295
00296 __asm__("movl %%cr2,%0":"=r" (address));
00297
00298
00299 if (regs->eflags & X86_EFLAGS_IF)
00300 local_irq_enable();
00301
00302
00303
00304 if ((address&0xfffff000)==0x5a5a5000)
00305 die("5a\n",regs,error_code);
00306 if ((address&0xfffff000)==0x5a5a5000)
00307 panic("poisoned\n");
00308 if (address==0x5a5a5a5a)
00309 die("5a\n",regs,error_code);
00310 if (address==0x5a5a5a5a)
00311 panic("poisoned\n");
00312
00313 if (in_atomic) {
00314 printk("atomic addr %x\n",address);
00315 address=0x11111111;
00316 }
00317
00318 #if 0
00319 if (intr_blocked(IPL__MMG))
00320 return;
00321
00322 regtrap(REG_INTR,IPL__MMG);
00323
00324 setipl(IPL__MMG);
00325 #endif
00326
00327 vmslock(&SPIN_MMG, IPL__MMG);
00328
00329
00330
00331 tsk = current;
00332
00333 #ifdef DEBUG_PF
00334 {
00335 int pid2=tsk->pcb_l_pid&31;
00336 dpf[1024*pid2+dpfc[pid2]]=tsk;
00337 dpfc[pid2]++;
00338 dpf[1024*pid2+dpfc[pid2]]=address;
00339 dpfc[pid2]++;
00340 #if 0
00341 long addr = &pid;
00342 addr-=4;
00343 dpf[1024*pid2+dpfc[pid2]]=*(long*)addr;
00344 dpfc[pid2]++;
00345 dpf[1024*pid2+dpfc[pid2]]=a->acb_l_kast;
00346 dpfc[pid2]++;
00347 #endif
00348 if (dpfc[pid2]>1000)
00349 dpfc[pid2]=0;
00350 }
00351 #endif
00352
00353 if (in_atomic) {
00354 printk("atomic addr %x\n",address);
00355 address=0x11111111;
00356 }
00357
00358 tsk->pcb_l_phd->phd$l_pageflts++;
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376 if (address >= TASK_SIZE && !(error_code & 5))
00377 goto vmalloc_fault;
00378
00379 mm = tsk->mm;
00380 info.si_code = SEGV_MAPERR;
00381
00382
00383
00384
00385
00386
00387 if (mm==&init_mm)
00388 goto no_context;
00389 if (!mm)
00390 goto no_context;
00391
00392 if (address<PAGE_SIZE)
00393 goto bad_area;
00394
00395 if (address>0x90000000)
00396 goto bad_area;
00397
00398 page = address & PAGE_MASK;
00399 pgd = pgd_offset(mm, page);
00400 pud = pud_offset(pgd, page);
00401 pmd = pmd_offset(pud, page);
00402
00403 if (0) {
00404
00405 printk("transform it\n");
00406 }
00407 pte = pte_offset(pmd, page);
00408 mypte=pte;
00409
00410 mmg_frewsle(current,address);
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427 if ((*(unsigned long *)pte)&_PAGE_TYP1) {
00428 if ((*(unsigned long *)pte)&_PAGE_TYP0) {
00429 unsigned long index=(*(unsigned long *)pte)>>PAGE_SHIFT;
00430 struct _secdef *pstl=current->pcb_l_phd->phd$l_pst_base_offset;
00431 #if 0
00432 struct _secdef *sec=&pstl[index];
00433 struct _wcb * window=sec->sec_l_window;
00434 unsigned long vbn=sec->sec_l_vbn;
00435 struct _rde * rde= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
00436 unsigned long offset;
00437 #else
00438 struct _secdef *sec;
00439 struct _wcb * window;
00440 unsigned long vbn;
00441 struct _rde * rde;
00442 unsigned long offset;
00443
00444 if (index>64) {
00445 printk("wrong %x %x %x %x\n",address,page,pte,*pte);
00446 die("Wrong\n",regs,error_code);
00447 panic("Wrong\n");
00448 }
00449 sec=&pstl[index];
00450 window=sec->sec_l_window;
00451 vbn=sec->sec_l_vbn;
00452 rde= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
00453 #endif
00454 if (rde==0) printk("vma0 address %x\n",address);
00455
00456 offset=((address-(unsigned long)rde->rde_pq_start_va)>>PAGE_SHIFT)+(vbn>>3);
00457
00458
00459
00460
00461
00462 {
00463 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
00464 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00465 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
00466
00467 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
00468
00469 }
00470
00471 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
00472 vmsunlock(&SPIN_MMG, -1);
00473 makereadast(window,pfn,address,pte,offset,error_code&2);
00474
00475 return;
00476 } else {
00477 extern int myswapfile;
00478 struct _pfl * pfl=myswapfile;
00479 struct _wcb * window=pfl->pfl_l_window;
00480 unsigned long vbn=mypte->pte_v_pgflpag;
00481 struct _rde * rde;
00482 unsigned long offset;
00483
00484 offset=vbn<<PAGE_SHIFT;
00485 {
00486 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
00487 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00488 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
00489
00490 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
00491 }
00492 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
00493 printk("soon reading pfl_page %x %x %x %x\n",vbn,pte,*(long*)pte,page);
00494 vmsunlock(&SPIN_MMG, -1);
00495 makereadast(window,pfn,address,pte,offset,error_code&2);
00496 return;
00497 }
00498 }
00499
00500 if (!((*(unsigned long *)pte)&_PAGE_TYP1)) {
00501 if (!((*(unsigned long *)pte)&_PAGE_TYP0)) {
00502 if ((*(unsigned long *)pte)&0xffffffff) {
00503 long pfn=mypte->pte_v_pfn;
00504 if (pfn>=num_physpages) goto notyet;
00505 if (pte!=mem_map[pfn].pfn_q_pte_index) goto notyet;
00506 if ((*(unsigned long *)pte)&_PAGE_PRESENT) goto notyet;
00507 int loc=mem_map[pfn].pfn_v_loc;
00508 if (loc<=PFN_C_BADPAGLST) {
00509 #ifdef OLDINT
00510 mmg_rempfn(loc,&mem_map[pfn]);
00511 #else
00512 mmg_rempfn(loc,pfn);
00513 #endif
00514 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00515 mmg_makewsle(tsk,tsk->pcb$l_phd,address,pte,pfn);
00516 *(unsigned long *)pte|=_PAGE_PRESENT;
00517 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
00518 }
00519 if (loc==PFN_C_WRTINPROG ) {
00520
00521 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00522 mmg_makewsle(tsk,tsk->pcb$l_phd,address,pte,pfn);
00523 *(unsigned long *)pte|=_PAGE_PRESENT;
00524 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
00525 }
00526
00527 vmsunlock(&SPIN_MMG, -1);
00528 return;
00529 notyet:
00530 {}
00531 } else {
00532 {
00533 struct _rde * rde= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_HIGHER, IPL$_ASTDEL);
00534 if (address<rde->rde_ps_start_va && address>=(rde->rde$ps_start_va-4*PAGE_SIZE)) {
00535 long page_size = rde->rde_ps_start_va - page;
00536 rde->rde_ps_start_va-=page_size;
00537 rde->rde_l_region_size+=page_size;
00538 }
00539 {
00540 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
00541 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00542 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
00543 mem_map[pfn].pfn_l_page_state|=PFN$M_MODIFY;
00544 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
00545 if (page==0) {
00546 printk("wrong %x %x %x %x\n",address,page,pte,*pte);
00547 die("Wrong\n",regs,error_code);
00548 panic("Wrong\n");
00549 }
00550 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
00551 memset(page,0,PAGE_SIZE);
00552 if ((error_code&2)==0) {
00553 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED;
00554 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
00555 }
00556 }
00557 }
00558 vmsunlock(&SPIN_MMG, -1);
00559 return;
00560 }
00561 } else {
00562 }
00563
00564
00565 }
00566
00567 #if 0
00568 vma= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
00569
00570 survive2:
00571 if (error_code&2) {
00572 if (!pte_write(*pte)) {
00573 switch (do_wp_page(mm, vma, address, pte, *pte)) {
00574 case 1:
00575 current->min_flt++;
00576 break;
00577 case 2:
00578 current->maj_flt++;
00579 break;
00580 default:
00581
00582 break;
00583 }
00584 }
00585 }
00586
00587
00588 *pte = pte_mkyoung(*pte);
00589 if(pte_write(*pte)) *pte = pte_mkdirty(*pte);
00590
00591
00592 vmsunlock(&SPIN_MMG, -1);
00593 return(0);
00594
00595 return;
00596 #endif
00597
00598
00599 down_read(&mm->mmap_sem);
00600
00601
00602 vma = mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
00603
00604 if (!vma)
00605 goto bad_area;
00606 if (vma->rde_ps_start_va <= address)
00607 goto good_area;
00608 if (!(vma->rde_l_flags & VM_GROWSDOWN))
00609 goto bad_area;
00610 if (error_code & 4) {
00611
00612
00613
00614
00615
00616
00617 if (address + 32 < regs->esp)
00618 goto bad_area;
00619 }
00620 if (expand_stack2(vma, address))
00621 goto bad_area;
00622
00623
00624
00625
00626 good_area:
00627 info.si_code = SEGV_ACCERR;
00628 write = 0;
00629 switch (error_code & 3) {
00630 default:
00631 #ifdef TEST_VERIFY_AREA
00632 if (regs->cs == KERNEL_CS)
00633 printk("WP fault at %08lx\n", regs->eip);
00634 #endif
00635
00636 case 2:
00637 if (!(vma->rde_l_flags & VM_WRITE))
00638 goto bad_area;
00639 write++;
00640 pfn=mypte->pte_v_pfn;
00641 mem_map[pfn].pfn_l_page_state=PFN$M_MODIFY;
00642 break;
00643 case 1:
00644 goto bad_area;
00645 case 0:
00646 if (!(vma->rde_l_flags & (VM_READ | VM_EXEC)))
00647 goto bad_area;
00648 }
00649
00650 survive:
00651
00652
00653
00654
00655
00656
00657
00658 switch (do_wp_page(mm, vma, address, pte, *pte)) {
00659 case 1:
00660 tsk->min_flt++;
00661 break;
00662 case 2:
00663 tsk->maj_flt++;
00664 break;
00665 case 0:
00666 goto do_sigbus;
00667 default:
00668 goto out_of_memory;
00669 }
00670
00671
00672
00673
00674 if (regs->eflags & VM_MASK) {
00675 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
00676 if (bit < 32)
00677 tsk->thread.screen_bitmap |= 1 << bit;
00678 }
00679 up_read(&mm->mmap_sem);
00680 vmsunlock(&SPIN_MMG, -1);
00681 return;
00682
00683
00684
00685
00686
00687 bad_area:
00688 up_read(&mm->mmap_sem);
00689
00690
00691 if (error_code & 4) {
00692 tsk->thread.cr2 = address;
00693 tsk->thread.error_code = error_code;
00694 tsk->thread.trap_no = 14;
00695 info.si_signo = SIGSEGV;
00696 info.si_errno = 0;
00697
00698 info.si_addr = (void *)address;
00699 force_sig_info(SIGSEGV, &info, tsk);
00700 vmsunlock(&SPIN_MMG, -1);
00701 return;
00702 }
00703
00704
00705
00706
00707 if (boot_cpu_data.f00f_bug) {
00708 unsigned long nr;
00709
00710 nr = (address - idt) >> 3;
00711
00712 if (nr == 6) {
00713 do_invalid_op(regs, 0);
00714 vmsunlock(&SPIN_MMG, -1);
00715 return;
00716 }
00717 }
00718
00719 no_context:
00720
00721 if ((fixup = search_exception_table(regs->eip)) != 0) {
00722 regs->eip = fixup;
00723 vmsunlock(&SPIN_MMG, -1);
00724 return;
00725 }
00726
00727
00728
00729
00730
00731
00732 bust_spinlocks(1);
00733
00734 if (address < PAGE_SIZE)
00735 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
00736 else
00737 printk(KERN_ALERT "Unable to handle kernel paging request");
00738 printk(" at virtual address %08lx\n",address);
00739 printk(" printing eip:\n");
00740 printk("%08lx\n", regs->eip);
00741 asm("movl %%cr3,%0":"=r" (page));
00742 page = ((unsigned long *) __va(page))[address >> 22];
00743 printk(KERN_ALERT "*pde = %08lx\n", page);
00744 if (page & 1) {
00745 page &= PAGE_MASK;
00746 address &= 0x003ff000;
00747 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
00748 printk(KERN_ALERT "*pte = %08lx\n", page);
00749 }
00750 die("Oops", regs, error_code);
00751 bust_spinlocks(0);
00752 do_exit(SIGKILL);
00753
00754
00755
00756
00757
00758 out_of_memory:
00759 up_read(&mm->mmap_sem);
00760 if (tsk->pcb_l_pid == INIT_PID) {
00761
00762 #if 0
00763 current->need_resched=1;
00764 schedule();
00765 #endif
00766 down_read(&mm->mmap_sem);
00767 goto survive;
00768 }
00769 printk("VM: killing process %s\n", tsk->pcb_t_lname);
00770 if (error_code & 4)
00771 do_exit(SIGKILL);
00772 goto no_context;
00773
00774 do_sigbus:
00775 up_read(&mm->mmap_sem);
00776
00777
00778
00779
00780
00781 tsk->thread.cr2 = address;
00782 tsk->thread.error_code = error_code;
00783 tsk->thread.trap_no = 14;
00784 info.si_signo = SIGBUS;
00785 info.si_errno = 0;
00786 info.si_code = BUS_ADRERR;
00787 info.si_addr = (void *)address;
00788 force_sig_info(SIGBUS, &info, tsk);
00789
00790
00791 if (!(error_code & 4))
00792 goto no_context;
00793 vmsunlock(&SPIN_MMG, -1);
00794 return;
00795
00796 vmalloc_fault:
00797 {
00798
00799
00800
00801
00802
00803
00804
00805 int offset = __pgd_offset(address);
00806 pgd_t *pgd, *pgd_k;
00807 pud_t *pud, *pud_k;
00808 pmd_t *pmd, *pmd_k;
00809 pte_t *pte_k;
00810
00811 asm("movl %%cr3,%0":"=r" (pgd));
00812 pgd = offset + (pgd_t *)__va(pgd);
00813 pgd_k = init_mm.pgd + offset;
00814
00815 if (!pgd_present(*pgd_k))
00816 goto no_context;
00817 set_pgd(pgd, *pgd_k);
00818
00819 pud = pud_offset(pgd, address);
00820 pud_k = pud_offset(pgd_k, address);
00821 if (!pud_present(*pud_k))
00822 goto no_context;
00823 set_pud(pud, *pud_k);
00824
00825 pmd = pmd_offset(pud, address);
00826 pmd_k = pmd_offset(pud_k, address);
00827 if (!pmd_present(*pmd_k))
00828 goto no_context;
00829 set_pmd(pmd, *pmd_k);
00830
00831 pte_k = pte_offset(pmd_k, address);
00832 if (!pte_present(*pte_k))
00833 goto no_context;
00834 vmsunlock(&SPIN_MMG, -1);
00835 return;
00836 }
00837 }
00838 #endif
00839
00840 #ifdef __arch_um__
00841
00842 unsigned long segv(unsigned long address, unsigned long ip, int is_write,
00843 int is_user)
00844 {
00845 struct mm_struct *mm = current->mm;
00846 struct _rde *vma;
00847 struct siginfo si;
00848 void *catcher;
00849 pgd_t *pgd;
00850 pmd_t *pmd;
00851 pte_t *pte;
00852 unsigned long page;
00853 signed long pfn;
00854 struct _mypte * mypte;
00855 struct _pcb * tsk=current;
00856
00857
00858
00859 if (intr_blocked(IPL__MMG))
00860 return;
00861
00862 regtrap(REG_INTR,IPL__MMG);
00863
00864 setipl(IPL__MMG);
00865
00866
00867
00868 if((address >= start_vm) && (address < end_vm)){
00869 flush_tlb_kernel_vm();
00870 return(0);
00871 }
00872 if(mm == NULL) panic("Segfault with no mm");
00873
00874 if (address&0x80000000) {
00875
00876 }
00877
00878 if (address<PAGE_SIZE)
00879 goto skip;
00880
00881 if (address>0x80000000)
00882 goto skip;
00883
00884 if (in_atomic) {
00885 printk("atomic addr %x\n",address);
00886 address=0x11111111;
00887 }
00888
00889 current->pcb_l_phd->phd$l_pageflts++;
00890
00891
00892 page = address & PAGE_MASK;
00893 pgd = pgd_offset(mm, page);
00894 pmd = pmd_offset(pgd, page);
00895 if (0) {
00896
00897 printk("transform it\n");
00898 }
00899 pte = pte_offset(pmd, page);
00900
00901 mypte = pte;
00902 if (((unsigned long)mypte)<0x80000000)
00903 goto skip;
00904
00905 mmg_frewsle(current,address);
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920 if (mypte->pte_v_typ1) {
00921 if (mypte->pte_v_typ0) {
00922 unsigned long index=(*(unsigned long *)pte)>>PAGE_SHIFT;
00923 struct _secdef *pstl=current->pcb_l_phd->phd$l_pst_base_offset;
00924 struct _secdef *sec=&pstl[index];
00925 struct _wcb * window=sec->sec_l_window;
00926 unsigned long vbn=sec->sec_l_vbn;
00927 struct _rde * rde= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
00928 unsigned long offset;
00929 offset=((address-(unsigned long)rde->rde_pq_start_va)>>PAGE_SHIFT)+(vbn>>3);
00930
00931 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
00932 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00933 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
00934
00935 *(unsigned long *)pte=((unsigned long)__va(pfn*PAGE_SIZE))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
00936 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
00937
00938 makereadast(window,pfn,address,pte,offset,is_write);
00939 return;
00940 } else {
00941 extern int myswapfile;
00942 struct _pfl * pfl=myswapfile;
00943 struct _wcb * window=pfl->pfl_l_window;
00944 unsigned long vbn=mypte->pte_v_pgflpag;
00945 struct _rde * rde;
00946 unsigned long offset;
00947
00948 offset=vbn<<PAGE_SHIFT;
00949 {
00950 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
00951 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00952 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
00953
00954 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
00955 }
00956 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
00957 printk("soon reading pfl_page %x %x %x %x\n",vbn,pte,*(long*)pte,page);
00958 makereadast(window,pfn,address,pte,offset,is_write);
00959 return;
00960 }
00961 }
00962
00963 if (mypte->pte_v_typ1==0) {
00964 if (mypte->pte_v_typ0==0) {
00965 if (mypte->pte_v_pfn) {
00966 long pfn=mypte->pte_v_pfn;
00967 if (pfn>=num_physpages) goto notyet;
00968 if (pte!=mem_map[pfn].pfn_q_pte_index) goto notyet;
00969 if ((*(unsigned long *)pte)&_PAGE_PRESENT) goto notyet;
00970 int loc=mem_map[pfn].pfn_v_loc;
00971 if (loc<=PFN_C_BADPAGLST) {
00972 #ifdef OLDINT
00973 mmg_rempfn(loc,&mem_map[pfn]);
00974 #else
00975 mmg_rempfn(loc,pfn);
00976 #endif
00977 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00978 mmg_makewsle(tsk,tsk->pcb$l_phd,address,pte,pfn);
00979 *(unsigned long *)pte|=_PAGE_PRESENT;
00980 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
00981 }
00982 if (loc==PFN_C_WRTINPROG ) {
00983
00984 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
00985 mmg_makewsle(tsk,tsk->pcb$l_phd,address,pte,pfn);
00986 *(unsigned long *)pte|=_PAGE_PRESENT;
00987 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
00988 }
00989
00990 return;
00991 notyet:
00992 } else {
00993 struct _rde * rde= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_HIGHER, IPL$_ASTDEL);
00994 if (address<rde->rde_ps_start_va && address>=(rde->rde$ps_start_va-PAGE_SIZE)) {
00995 rde->rde_ps_start_va-=PAGE_SIZE;
00996 rde->rde_l_region_size+=PAGE_SIZE;
00997 }
00998 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
00999 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01000 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
01001 mem_map[pfn].pfn_l_page_state|=PFN$M_MODIFY;
01002 *(unsigned long *)pte=((unsigned long)__va(pfn*PAGE_SIZE))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
01003 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
01004 bzero(page,PAGE_SIZE);
01005 if (is_write==0) {
01006 *(unsigned long *)pte=((unsigned long)__va(pfn*PAGE_SIZE))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED;
01007 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
01008 }
01009 return;
01010 }
01011 } else {
01012 struct _mypte * gpt = mmg_gq_gpt_base;
01013 unsigned long index=mypte->pte_v_gptx;
01014 struct _mypte * gpte = &gpt[index];
01015
01016 if (gpte->pte_v_valid) {
01017 if (gpte->pte_v_typ1) {
01018 *mypte=*gpte;
01019 mypte->pte_v_global=0;
01020 mypte->pte_v_gblwrt=0;
01021 mmg_incptref(mmg$gl_sysphd,pte);
01022 } else {
01023 *mypte=*gpte;
01024 mypte->pte_v_global=0;
01025 mypte->pte_v_gblwrt=0;
01026 mmg_incptref(mmg$gl_sysphd,pte);
01027 }
01028
01029 } else {
01030
01031 if (gpte->pte_v_typ1) {
01032 unsigned long gptx=mypte->pte_v_gptx;
01033 struct _mypte * gpte=&((struct _mypte *)mmg_gq_gpt_base)[gptx];
01034 struct _secdef * pstl=((struct _phd *)mmg_gl_sysphd)->phd$l_pst_base_offset;
01035 unsigned long index=gpte->pte_v_stx;
01036 struct _secdef *sec=&pstl[index];
01037 struct _wcb * window=sec->sec_l_window;
01038 unsigned long vbn=sec->sec_l_vbn;
01039 unsigned long offset=gptx-sec->sec_l_vpx;
01040 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page|PFN$C_GLOBAL,pte);
01041 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01042 *(unsigned long *)gpte=((unsigned long)__va(pfn*PAGE_SIZE))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
01043 gpte->pte_v_global=1;
01044 *mypte=*gpte;
01045 mypte->pte_v_global=0;
01046 mypte->pte_v_gblwrt=0;
01047 *(unsigned long *)pte|=_PAGE_DIRTY;
01048 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
01049 makereadast(window,pfn,address,pte,offset,is_write);
01050 } else {
01051 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page|PFN$C_GLOBAL,pte);
01052 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01053 *(unsigned long *)pte=((unsigned long)__va(pfn*PAGE_SIZE))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
01054 *(unsigned long *)gpte=((unsigned long)__va(pfn*PAGE_SIZE))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
01055 gpte->pte_v_global=1;
01056 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
01057 }
01058 }
01059
01060
01061 }
01062
01063
01064 }
01065
01066 vma= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
01067
01068 survive2:
01069 if (is_write) {
01070 if (!pte_write(*pte)) {
01071 pfn=mypte->pte_v_pfn;
01072 mem_map[pfn].pfn_l_page_state=PFN$M_MODIFY;
01073 switch (do_wp_page(mm, vma, address, pte, *pte)) {
01074 case 1:
01075 current->min_flt++;
01076 break;
01077 case 2:
01078 current->maj_flt++;
01079 break;
01080 default:
01081 if (current->pcb_l_pid == INIT_PID) {
01082 up_read(&mm->mmap_sem);
01083 yield();
01084 down_read(&mm->mmap_sem);
01085
01086 }
01087 }
01088 }
01089 }
01090
01091
01092 *pte = pte_mkyoung(*pte);
01093 if(pte_write(*pte)) *pte = pte_mkdirty(*pte);
01094 flush_tlb_page2(mm, page);
01095
01096 return(0);
01097
01098 return;
01099
01100 if (is_write==0) {
01101 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
01102 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01103 *(unsigned long *)pte=((unsigned long)__va(pfn*PAGE_SIZE))|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
01104 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
01105 bcopy(page,__va(pfn*PAGE_SIZE),PAGE_SIZE);
01106 }
01107
01108 return;
01109
01110
01111 skip:
01112
01113 catcher = current->thread.fault_catcher;
01114 si.si_code = SEGV_MAPERR;
01115 down_read(&mm->mmap_sem);
01116
01117 goto bad;
01118
01119 vma = mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
01120 if(!vma) goto bad;
01121 else if(vma->rde_pq_start_va <= address) goto good_area;
01122
01123 else if(expand_stack2(vma, address)) goto bad;
01124
01125 good_area:
01126 si.si_code = SEGV_ACCERR;
01127
01128
01129 page = address & PAGE_MASK;
01130 if(page == (unsigned long) current + PAGE_SIZE)
01131 panic("Kernel stack overflow");
01132 pgd = pgd_offset(mm, page);
01133 pmd = pmd_offset(pgd, page);
01134 do {
01135 survive:
01136 switch (do_wp_page(mm, vma, address, pte, *pte)) {
01137 case 1:
01138 current->min_flt++;
01139 break;
01140 case 2:
01141 current->maj_flt++;
01142 break;
01143 default:
01144 if (current->pcb_l_pid == INIT_PID) {
01145 up_read(&mm->mmap_sem);
01146 yield();
01147 down_read(&mm->mmap_sem);
01148 goto survive;
01149 }
01150
01151 case 0:
01152 goto bad;
01153 }
01154 pte = pte_offset(pmd, page);
01155 } while(!pte_present(*pte));
01156 *pte = pte_mkyoung(*pte);
01157 if(pte_write(*pte)) *pte = pte_mkdirty(*pte);
01158 flush_tlb_page2(mm, page);
01159 up_read(&mm->mmap_sem);
01160 return(0);
01161 bad:
01162 if(catcher != NULL){
01163 current->thread.fault_addr = (void *) address;
01164 up_read(&mm->mmap_sem);
01165 do_longjmp(catcher);
01166 }
01167 else if(current->thread.fault_addr != NULL){
01168 panic("fault_addr set but no fault catcher");
01169 }
01170 if(!is_user)
01171 panic("Kernel mode fault at addr 0x%lx, ip 0x%lx",
01172 address, ip);
01173 si.si_signo = SIGSEGV;
01174 si.si_addr = (void *) address;
01175 current->thread.cr2 = address;
01176 current->thread.err = is_write;
01177 force_sig_info(SIGSEGV, &si, current);
01178 up_read(&mm->mmap_sem);
01179 return(0);
01180 }
01181 #endif
01182 #endif
01183
01184 #ifdef CONFIG_VMS
01185
01186 #if 0
01187 long myindex=0;
01188 long mystack[1024];
01189 long mystack2[1024];
01190 long mystack3[1024];
01191 long mystack4[1024];
01192 #endif
01193
01194 int mmg_frewsle(struct _pcb * p, void * va) {
01195 struct _phd * phd = p->pcb_l_phd;
01196 struct _wsl * wsl = phd->phd_l_wslist;
01197 struct _wsl wsle ;
01198 unsigned long * pte;
01199 unsigned long index;
01200 unsigned long va2;
01201 unsigned long sts;
01202
01203 again:
01204 wsle = wsl[phd->phd_l_wsnext];
01205 if (wsle.wsl_pq_va==0) {
01206 if (phd->phd_l_wssize<p->pcb$l_ppgcnt+p->pcb$l_gpgcnt) {
01207 goto more;
01208 }
01209
01210 if (phd->phd_l_wsquota<p->pcb$l_ppgcnt+p->pcb$l_gpgcnt) {
01211 goto more;
01212 }
01213
01214 return SS__NORMAL;
01215
01216
01217
01218 }
01219 if ((((int)wsle.wsl_pq_va)&WSL$M_PAGTYP)==WSL$C_PPGTBL||(((int)wsle.wsl$pq_va)&WSL$M_PAGTYP)==WSL$C_GPGTBL) {
01220
01221 }
01222
01223
01224
01225
01226
01227 index=phd->phd_l_wsnext;
01228 va2=((unsigned long)wsle.wsl_pq_va)&0xfffff000;
01229 pte=findpte_new(p->mm,va2);
01230
01231 {
01232 #ifdef __arch_um__
01233 signed long pfn=__pa(((struct _mypte*)pte)->pte_v_pfn << PAGE_SHIFT) >> PAGE_SHIFT ;
01234 #else
01235 signed long pfn=((struct _mypte*)pte)->pte_v_pfn;
01236 #endif
01237
01238 #if 0
01239
01240 if ((mem_map[pfn].pfn_q_bak&PTE$M_TYP0)==0)
01241 goto more;
01242
01243 if (*pte&_PAGE_DIRTY)
01244 goto more;
01245 #endif
01246 if ((unsigned long)va2>=0x70000000) goto more;
01247 }
01248
01249 sts=mmg_frewslx(p, va2,pte,index);
01250 if (sts==SS__NORMAL)
01251 return SS__NORMAL;
01252
01253 more:
01254 phd->phd_l_wsnext++;
01255 if (phd->phd_l_wsnext>phd->phd$l_wslast)
01256 phd->phd_l_wsnext=0;
01257 goto again;
01258
01259 }
01260
01261 int mmg_frewslx(struct _pcb * p, void * va,unsigned long * pte, unsigned long index) {
01262 #ifdef __arch_um__
01263 signed long pfn=__pa(((struct _mypte*)pte)->pte_v_pfn << PAGE_SHIFT) >> PAGE_SHIFT ;
01264 #else
01265 signed long pfn=((struct _mypte*)pte)->pte_v_pfn;
01266 #endif
01267 struct _wsl * wsl = p->pcb_l_phd->phd$l_wslist;
01268
01269 #if 0
01270
01271 if (*pte&_PAGE_DIRTY)
01272 mem_map[pfn].pfn_v_modify=1;
01273 else
01274 mem_map[pfn].pfn_v_modify=0;
01275 (*pte) &= ~(_PAGE_DIRTY|_PAGE_PRESENT);
01276 #endif
01277
01278
01279
01280
01281 #ifdef __arch_um__
01282
01283 *pte|=_PAGE_NEWPAGE;
01284 #endif
01285 flush_tlb_range(p->mm, va, va + PAGE_SIZE);
01286
01287
01288 mmg_delwslx(p,p->pcb$l_phd,index,pte);
01289
01290 #if 0
01291 mystack[myindex]=va;
01292 mystack2[myindex]=pte;
01293 mystack3[myindex]=*(long*)pte;
01294 mystack4[myindex]=index|((mem_map[pfn].pfn_l_page_state&PFN$M_MODIFY)<<16);
01295 myindex++;
01296 #endif
01297 mmg_relpfn(pfn);
01298
01299 return SS__NORMAL;
01300 }
01301
01302 mmg_delwslx(struct _pcb * pcb, struct _phd * phd, int index,int pte) {
01303 struct _wsl * wsl = phd->phd_l_wslist;
01304
01305 if ((((unsigned long)wsl[index].wsl_pq_va)&WSL$M_PAGTYP)==WSL$C_GLOBAL) {
01306
01307 mmg_decptref(pcb->pcb$l_phd,pte);
01308 pcb->pcb_l_gpgcnt--;
01309 }
01310
01311 if ((((unsigned long)wsl[index].wsl_pq_va)&WSL$M_PAGTYP)==WSL$C_PROCESS) {
01312
01313 pcb->pcb_l_ppgcnt--;
01314 }
01315
01316 wsl[index].wsl_pq_va=0;
01317
01318 }
01319
01320 unsigned long findpte_new(struct mm_struct *mm, unsigned long address) {
01321 unsigned long page;
01322 pgd_t *pgd = 0;
01323 pud_t *pud = 0;
01324 pmd_t *pmd = 0;
01325 pte_t *pte = 0;
01326 page = address & PAGE_MASK;
01327 pgd = pgd_offset(mm, page);
01328 pud = pud_offset(pgd, page);
01329 pmd = pmd_offset(pud, page);
01330 if (pmd && *(long *)pmd)
01331 pte = pte_offset(pmd, page);
01332 return pte;
01333 }
01334
01335 #ifdef __x86_64__
01336
01337
01338
01339
01340 static int is_prefetch(struct pt_regs *regs, unsigned long addr)
01341 {
01342 unsigned char *instr = (unsigned char *)(regs->rip);
01343 int scan_more = 1;
01344 int prefetch = 0;
01345 unsigned char *max_instr = instr + 15;
01346
01347
01348 if (regs->rip == addr)
01349 return 0;
01350
01351 if (regs->cs & (1<<2))
01352 return 0;
01353
01354 while (scan_more && instr < max_instr) {
01355 unsigned char opcode;
01356 unsigned char instr_hi;
01357 unsigned char instr_lo;
01358
01359 if (__get_user(opcode, instr))
01360 break;
01361
01362 instr_hi = opcode & 0xf0;
01363 instr_lo = opcode & 0x0f;
01364 instr++;
01365
01366 switch (instr_hi) {
01367 case 0x20:
01368 case 0x30:
01369
01370
01371
01372
01373 scan_more = ((instr_lo & 7) == 0x6);
01374 break;
01375
01376 case 0x40:
01377
01378
01379
01380
01381
01382
01383 scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS);
01384 break;
01385
01386 case 0x60:
01387
01388 scan_more = (instr_lo & 0xC) == 0x4;
01389 break;
01390 case 0xF0:
01391
01392 scan_more = !instr_lo || (instr_lo>>1) == 1;
01393 break;
01394 case 0x00:
01395
01396 scan_more = 0;
01397 if (__get_user(opcode, instr))
01398 break;
01399 prefetch = (instr_lo == 0xF) &&
01400 (opcode == 0x0D || opcode == 0x18);
01401 break;
01402 default:
01403 scan_more = 0;
01404 break;
01405 }
01406 }
01407
01408 #if 0
01409 if (prefetch)
01410 printk("%s: prefetch caused page fault at %lx/%lx\n", current->pcb_t_lname,
01411 regs->rip, addr);
01412 #endif
01413 return prefetch;
01414 }
01415
01416 #define _PAGE_NEWPAGE 0
01417
01418 int page_fault_trace;
01419 int exception_trace = 1;
01420
01421
01422
01423
01424
01425
01426
01427
01428
01429
01430
01431
01432 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
01433 {
01434 struct task_struct *tsk;
01435 struct mm_struct *mm;
01436 struct _rde * vma;
01437 unsigned long address;
01438 unsigned long page;
01439 signed long pfn;
01440 unsigned long fixup;
01441 int write;
01442 siginfo_t info;
01443 pgd_t *pgd;
01444 pud_t *pud;
01445 pmd_t *pmd;
01446 pte_t *pte;
01447 struct _mypte * mypte;
01448
01449
01450 __asm__("movq %%cr2,%0":"=r" (address));
01451
01452 if (regs->eflags & X86_EFLAGS_IF)
01453 __sti();
01454
01455 #ifdef CONFIG_CHECKING
01456 if (1 ||page_fault_trace)
01457 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
01458 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
01459
01460
01461 {
01462 unsigned long gs;
01463 struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
01464 rdmsrl(MSR_GS_BASE, gs);
01465 if (gs != (unsigned long)pda) {
01466 wrmsrl(MSR_GS_BASE, pda);
01467 printk("page_fault: wrong gs %lx expected %p\n", gs, pda);
01468 }
01469 }
01470 #endif
01471
01472
01473
01474 if (address==0x5a5a5a5a)
01475 panic("poisoned\n");
01476
01477 if (in_atomic) {
01478 printk("atomic addr %x\n",address);
01479 address=0x11111111;
01480 }
01481
01482 #if 0
01483 if (intr_blocked(IPL__MMG))
01484 return;
01485
01486 regtrap(REG_INTR,IPL__MMG);
01487
01488 setipl(IPL__MMG);
01489
01490 #endif
01491
01492 vmslock(&SPIN_MMG, IPL__MMG);
01493
01494
01495 tsk = current;
01496 mm = tsk->mm;
01497 info.si_code = SEGV_MAPERR;
01498
01499 if (in_atomic) {
01500 printk("atomic addr %x\n",address);
01501 address=0x11111111;
01502 }
01503
01504 tsk->pcb_l_phd->phd$l_pageflts++;
01505
01506
01507 if (unlikely(!(error_code & 5) &&
01508 ((address >= VMALLOC_START && address <= VMALLOC_END) ||
01509 (address >= MODULES_VADDR && address <= MODULES_END))))
01510 goto vmalloc_fault;
01511
01512
01513
01514
01515
01516 if (mm==&init_mm)
01517 goto no_context;
01518 if (in_interrupt() || !mm)
01519 goto bad_area_nosemaphore;
01520
01521 if (address<PAGE_SIZE)
01522 goto bad_area;
01523
01524
01525
01526
01527
01528 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
01529 (address >> 32))
01530 {
01531 vmsunlock(&SPIN_MMG, -1);
01532 return;
01533 }
01534
01535 #if 0
01536 page = address & PAGE_MASK;
01537 pgd = pgd_offset(mm, page);
01538 pmd = pmd_offset(pgd, page);
01539 #else
01540 spin_unlock(&mm->page_table_lock);
01541 page = address & PAGE_MASK;
01542 pgd = pgd_offset(mm, page);
01543 pud = pud_alloc(mm, pgd, page);
01544 pmd = pmd_alloc(mm, pud, page);
01545 pte = pte_alloc(mm, pmd, page);
01546 spin_unlock(&mm->page_table_lock);
01547 #endif
01548
01549 if (0) {
01550
01551 printk("transform it\n");
01552 }
01553 #if 0
01554 pte = pte_offset(pmd, page);
01555 #endif
01556 mypte=pte;
01557
01558 mmg_frewsle(current,address);
01559
01560
01561
01562
01563
01564
01565
01566
01567
01568
01569
01570
01571
01572
01573
01574
01575 if ((*(unsigned long *)pte)&_PAGE_TYP1) {
01576 if ((*(unsigned long *)pte)&_PAGE_TYP0) {
01577 unsigned long index=(*(unsigned long *)pte)>>PAGE_SHIFT;
01578 struct _secdef *pstl=current->pcb_l_phd->phd$l_pst_base_offset;
01579 #if 0
01580 struct _secdef *sec=&pstl[index];
01581 struct _wcb * window=sec->sec_l_window;
01582 unsigned long vbn=sec->sec_l_vbn;
01583 struct _rde * rde= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
01584 unsigned long offset;
01585 #else
01586 struct _secdef *sec;
01587 struct _wcb * window;
01588 unsigned long vbn;
01589 struct _rde * rde;
01590 unsigned long offset;
01591
01592 if (index>64) {
01593 printk("wrong %x %x %x %x\n",address,page,pte,*pte);
01594 die("Wrong\n",regs,error_code);
01595 panic("Wrong\n");
01596 }
01597 sec=&pstl[index];
01598 window=sec->sec_l_window;
01599 vbn=sec->sec_l_vbn;
01600 rde= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
01601 #endif
01602 if (rde==0) printk("vma0 address %x\n",address);
01603
01604 offset=((address-(unsigned long)rde->rde_pq_start_va)>>PAGE_SHIFT)+(vbn>>3);
01605
01606
01607
01608
01609
01610 {
01611 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
01612 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01613 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
01614
01615 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
01616
01617 }
01618
01619 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
01620 vmsunlock(&SPIN_MMG, -1);
01621 makereadast(window,pfn,address,pte,offset,error_code&2);
01622
01623 return;
01624 } else {
01625 extern int myswapfile;
01626 struct _pfl * pfl=myswapfile;
01627 struct _wcb * window=pfl->pfl_l_window;
01628 unsigned long vbn=mypte->pte_v_pgflpag;
01629 struct _rde * rde;
01630 unsigned long offset;
01631
01632 offset=vbn<<PAGE_SHIFT;
01633 {
01634 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
01635 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01636 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
01637
01638 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
01639 }
01640 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
01641 printk("soon reading pfl_page %x %x %x %x\n",vbn,pte,*(long*)pte,page);
01642 vmsunlock(&SPIN_MMG, -1);
01643 makereadast(window,pfn,address,pte,offset,error_code&2);
01644 return;
01645 }
01646 }
01647
01648 if (!((*(unsigned long *)pte)&_PAGE_TYP1)) {
01649 if (!((*(unsigned long *)pte)&_PAGE_TYP0)) {
01650 if ((*(unsigned long *)pte)&0xffffffff) {
01651 long pfn=mypte->pte_v_pfn;
01652 if (pfn>=num_physpages) goto notyet;
01653 if (pte!=mem_map[pfn].pfn_q_pte_index) goto notyet;
01654 if ((*(unsigned long *)pte)&_PAGE_PRESENT) goto notyet;
01655 int loc=mem_map[pfn].pfn_v_loc;
01656 if (loc<=PFN_C_BADPAGLST) {
01657 #ifdef OLDINT
01658 mmg_rempfn(loc,&mem_map[pfn]);
01659 #else
01660 mmg_rempfn(loc,pfn);
01661 #endif
01662 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01663 mmg_makewsle(tsk,tsk->pcb$l_phd,address,pte,pfn);
01664 *(unsigned long *)pte|=_PAGE_PRESENT;
01665 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
01666 }
01667 if (loc==PFN_C_WRTINPROG ) {
01668
01669 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01670 mmg_makewsle(tsk,tsk->pcb$l_phd,address,pte,pfn);
01671 *(unsigned long *)pte|=_PAGE_PRESENT;
01672 flush_tlb_range(current->mm, page, page + PAGE_SIZE);
01673 }
01674
01675 vmsunlock(&SPIN_MMG, -1);
01676 return;
01677 notyet:
01678 {}
01679 } else {
01680 {
01681 struct _rde * rde= mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_HIGHER, IPL$_ASTDEL);
01682 if (address<rde->rde_ps_start_va && address>=(rde->rde$ps_start_va-4*PAGE_SIZE)) {
01683 long page_size = rde->rde_ps_start_va - page;
01684 rde->rde_ps_start_va-=page_size;
01685 rde->rde_l_region_size+=page_size;
01686 }
01687 {
01688 pfn = mmg_ininewpfn(tsk,tsk->pcb$l_phd,page,pte);
01689 mem_map[pfn].pfn_v_loc=PFN$C_ACTIVE;
01690 mem_map[pfn].pfn_q_bak=*(unsigned long *)pte;
01691 mem_map[pfn].pfn_l_page_state|=PFN$M_MODIFY;
01692 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
01693 if (page==0) {
01694 printk("wrong %x %x %x %x\n",address,page,pte,*pte);
01695 die("Wrong\n",regs,error_code);
01696 panic("Wrong\n");
01697 }
01698 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
01699 memset(page,0,PAGE_SIZE);
01700 if ((error_code&2)==0) {
01701 *(unsigned long *)pte=((unsigned long)(pfn<<PAGE_SHIFT))|_PAGE_NEWPAGE|_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED;
01702 flush_tlb_range(tsk->mm, page, page + PAGE_SIZE);
01703 }
01704 }
01705 }
01706 vmsunlock(&SPIN_MMG, -1);
01707 return;
01708 }
01709 } else {
01710 }
01711
01712
01713 }
01714
01715 again:
01716 down_read(&mm->mmap_sem);
01717
01718
01719 vma = mmg_lookup_rde_va(address, current->pcb$l_phd, LOOKUP_RDE_EXACT, IPL$_ASTDEL);
01720 if (!vma)
01721 goto bad_area;
01722 if (vma->rde_ps_start_va <= address)
01723 goto good_area;
01724 if (!(vma->rde_l_flags & VM_GROWSDOWN))
01725 goto bad_area;
01726 if (error_code & 4) {
01727
01728 if (address + 128 < regs->rsp)
01729 goto bad_area;
01730 }
01731 if (expand_stack2(vma, address))
01732 goto bad_area;
01733
01734
01735
01736
01737 good_area:
01738 info.si_code = SEGV_ACCERR;
01739 write = 0;
01740 switch (error_code & 3) {
01741 default:
01742
01743 case 2:
01744 if (!(vma->rde_l_flags & VM_WRITE))
01745 goto bad_area;
01746 write++;
01747 pfn=mypte->pte_v_pfn;
01748 mem_map[pfn].pfn_l_page_state=PFN$M_MODIFY;
01749 break;
01750 case 1:
01751 goto bad_area;
01752 case 0:
01753 if (!(vma->rde_l_flags & (VM_READ | VM_EXEC)))
01754 goto bad_area;
01755 }
01756
01757
01758
01759
01760
01761
01762
01763 switch (do_wp_page(mm, vma, address, pte, *pte)) {
01764 case 1:
01765 tsk->min_flt++;
01766 break;
01767 case 2:
01768 tsk->maj_flt++;
01769 break;
01770 case 0:
01771 goto do_sigbus;
01772 default:
01773 goto out_of_memory;
01774 }
01775
01776 up_read(&mm->mmap_sem);
01777 vmsunlock(&SPIN_MMG, -1);
01778 return;
01779
01780
01781
01782
01783
01784 bad_area:
01785 up_read(&mm->mmap_sem);
01786
01787 bad_area_nosemaphore:
01788
01789 if (error_code & 4) {
01790 if (is_prefetch(regs, address))
01791 {
01792 vmsunlock(&SPIN_MMG, -1);
01793 return;
01794 }
01795
01796 if (exception_trace && !(tsk->ptrace & PT_PTRACED) &&
01797 (tsk->sig->action[SIGSEGV-1].sa.sa_handler == SIG_IGN ||
01798 (tsk->sig->action[SIGSEGV-1].sa.sa_handler == SIG_DFL)))
01799 printk(KERN_INFO
01800 "%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
01801 tsk->pcb_t_lname, tsk->pcb$l_pid, address, regs->rip,
01802 regs->rsp, error_code);
01803
01804 tsk->thread.cr2 = address;
01805
01806 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
01807 tsk->thread.trap_no = 14;
01808 info.si_signo = SIGSEGV;
01809 info.si_errno = 0;
01810
01811 info.si_addr = (void *)address;
01812 force_sig_info(SIGSEGV, &info, tsk);
01813 vmsunlock(&SPIN_MMG, -1);
01814 return;
01815 }
01816
01817 no_context:
01818
01819
01820 if ((fixup = search_exception_table(regs->rip)) != 0) {
01821 regs->rip = fixup;
01822 if (0 && exception_trace)
01823 printk(KERN_ERR
01824 "%s: fixed kernel exception at %lx address %lx err:%ld\n",
01825 tsk->pcb_t_lname, regs->rip, address, error_code);
01826 vmsunlock(&SPIN_MMG, -1);
01827 return;
01828 }
01829
01830 if (is_prefetch(regs, address))
01831 {
01832 vmsunlock(&SPIN_MMG, -1);
01833 return;
01834 }
01835
01836
01837
01838
01839
01840
01841 unsigned long flags;
01842 prepare_die(&flags);
01843 if (address < PAGE_SIZE)
01844 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
01845 else
01846 printk(KERN_ALERT "Unable to handle kernel paging request");
01847 printk(KERN_ALERT " at %016lx RIP: ", address);
01848 printk_address(regs->rip);
01849 dump_pagetable(address);
01850 __die("Oops", regs, error_code);
01851
01852 printk(KERN_EMERG "CR2: %016lx\n", address);
01853 exit_die(flags);
01854 do_exit(SIGKILL);
01855
01856
01857
01858
01859
01860 out_of_memory:
01861 up_read(&mm->mmap_sem);
01862 if (current->pcb_l_pid == 1) {
01863 #if 0
01864 tsk->policy |= SCHED_YIELD;
01865 #endif
01866 schedule();
01867 goto again;
01868 }
01869 printk("VM: killing process %s\n", tsk->pcb_t_lname);
01870 if (error_code & 4)
01871 do_exit(SIGKILL);
01872 goto no_context;
01873
01874 do_sigbus:
01875 up_read(&mm->mmap_sem);
01876
01877
01878 if (!(error_code & 4))
01879 goto no_context;
01880
01881 if (is_prefetch(regs, address))
01882 {
01883 vmsunlock(&SPIN_MMG, -1);
01884
01885 return;
01886 }
01887
01888 tsk->thread.cr2 = address;
01889 tsk->thread.error_code = error_code;
01890 tsk->thread.trap_no = 14;
01891 info.si_signo = SIGBUS;
01892 info.si_errno = 0;
01893 info.si_code = BUS_ADRERR;
01894 info.si_addr = (void *)address;
01895 force_sig_info(SIGBUS, &info, tsk);
01896 vmsunlock(&SPIN_MMG, -1);
01897 return;
01898
01899
01900 vmalloc_fault:
01901 {
01902 pgd_t *pgd;
01903 pud_t *pud;
01904 pmd_t *pmd;
01905 pte_t *pte;
01906
01907
01908
01909
01910
01911
01912
01913
01914 #if 0
01915 printk("vmalloc fault %lx index %lu\n",address,pml4_index(address));
01916 dump_pagetable(address);
01917 #endif
01918
01919 pgd = pgd_offset_k(address);
01920 #if 0
01921 if (pgd != current_pgd_offset_k(address))
01922 goto bad_area_nosemaphore;
01923 #endif
01924 if (!pgd_present(*pgd))
01925 goto bad_area_nosemaphore;
01926 pud = pud_offset(pgd, address);
01927 if (!pud_present(*pud))
01928 goto bad_area_nosemaphore;
01929 pmd = pmd_offset(pud, address);
01930 if (!pmd_present(*pmd))
01931 goto bad_area_nosemaphore;
01932 pte = pte_offset(pmd, address);
01933 if (!pte_present(*pte))
01934 goto bad_area_nosemaphore;
01935
01936 __flush_tlb_all();
01937 vmsunlock(&SPIN_MMG, -1);
01938 return;
01939 }
01940 }
01941 #endif
01942 #endif
01943