00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045 #include <linux/config.h>
00046 #include <linux/mm.h>
00047 #include <linux/mman.h>
00048 #include <linux/swap.h>
00049 #include <linux/smp_lock.h>
00050 #include <linux/swapctl.h>
00051 #include <linux/iobuf.h>
00052 #include <linux/highmem.h>
00053 #include <linux/pagemap.h>
00054
00055 #include <asm/pgalloc.h>
00056 #include <asm/uaccess.h>
00057 #include <asm/tlb.h>
00058 #include <asm/pgtable.h>
00059
00060 #include <ipldef.h>
00061 #include <phddef.h>
00062 #include <rdedef.h>
00063 #include <va_rangedef.h>
00064 #include <vmspte.h>
00065 #include <wsldef.h>
00066 #include <exe_routines.h>
00067 #include <misc_routines.h>
00068 #include <mmg_routines.h>
00069 #include <fcbdef.h>
00070
00071 pgprot_t x_to_prot(int x) {
00072 pgprot_t y;
00073 y.pgprot=x;
00074 return y;
00075 }
00076
00077 unsigned long max_mapnr;
00078 unsigned long num_physpages;
00079 void * high_memory;
00080 struct page *highmem_start_page;
00081
00082 struct vm_area_struct * find_vma2(struct mm_struct * mm, unsigned long addr) {
00083 panic("findvma\n");
00084 }
00085
00086
00087
00088
00089
00090
00091 static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
00092 {
00093 if (from == ZERO_PAGE(address)) {
00094 clear_user_highpage(to, address);
00095 return;
00096 }
00097 copy_user_highpage(to, from, address);
00098 }
00099
00100 mem_map_t * mem_map;
00101
00102
00103
00104
00105
00106
00107
00108 void pgd_clear_bad(pgd_t *pgd)
00109 {
00110 pgd_ERROR(*pgd);
00111 pgd_clear(pgd);
00112 }
00113
00114 void pud_clear_bad(pud_t *pud)
00115 {
00116 pud_ERROR(*pud);
00117 pud_clear(pud);
00118 }
00119
00120 void pmd_clear_bad(pmd_t *pmd)
00121 {
00122 pmd_ERROR(*pmd);
00123 pmd_clear(pmd);
00124 }
00125
00126 #if 0
00127
00128
00129
00130
00131 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
00132 {
00133 struct page *page = pmd_page(*pmd);
00134 pmd_clear(pmd);
00135 pte_lock_deinit(page);
00136 pte_free_tlb(tlb, page);
00137 dec_page_state(nr_page_table_pages);
00138 tlb->mm->nr_ptes--;
00139 }
00140
00141 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
00142 unsigned long addr, unsigned long end,
00143 unsigned long floor, unsigned long ceiling)
00144 {
00145 pmd_t *pmd;
00146 unsigned long next;
00147 unsigned long start;
00148
00149 start = addr;
00150 pmd = pmd_offset(pud, addr);
00151 do {
00152 next = pmd_addr_end(addr, end);
00153 if (pmd_none_or_clear_bad(pmd))
00154 continue;
00155 free_pte_range(tlb, pmd);
00156 } while (pmd++, addr = next, addr != end);
00157
00158 start &= PUD_MASK;
00159 if (start < floor)
00160 return;
00161 if (ceiling) {
00162 ceiling &= PUD_MASK;
00163 if (!ceiling)
00164 return;
00165 }
00166 if (end - 1 > ceiling - 1)
00167 return;
00168
00169 pmd = pmd_offset(pud, start);
00170 pud_clear(pud);
00171 pmd_free_tlb(tlb, pmd);
00172 }
00173
00174 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
00175 unsigned long addr, unsigned long end,
00176 unsigned long floor, unsigned long ceiling)
00177 {
00178 pud_t *pud;
00179 unsigned long next;
00180 unsigned long start;
00181
00182 start = addr;
00183 pud = pud_offset(pgd, addr);
00184 do {
00185 next = pud_addr_end(addr, end);
00186 if (pud_none_or_clear_bad(pud))
00187 continue;
00188 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
00189 } while (pud++, addr = next, addr != end);
00190
00191 start &= PGDIR_MASK;
00192 if (start < floor)
00193 return;
00194 if (ceiling) {
00195 ceiling &= PGDIR_MASK;
00196 if (!ceiling)
00197 return;
00198 }
00199 if (end - 1 > ceiling - 1)
00200 return;
00201
00202 pud = pud_offset(pgd, start);
00203 pgd_clear(pgd);
00204 pud_free_tlb(tlb, pud);
00205 }
00206
00207
00208
00209
00210
00211
00212 void free_pgd_range(struct mmu_gather **tlb,
00213 unsigned long addr, unsigned long end,
00214 unsigned long floor, unsigned long ceiling)
00215 {
00216 pgd_t *pgd;
00217 unsigned long next;
00218 unsigned long start;
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246 addr &= PMD_MASK;
00247 if (addr < floor) {
00248 addr += PMD_SIZE;
00249 if (!addr)
00250 return;
00251 }
00252 if (ceiling) {
00253 ceiling &= PMD_MASK;
00254 if (!ceiling)
00255 return;
00256 }
00257 if (end - 1 > ceiling - 1)
00258 end -= PMD_SIZE;
00259 if (addr > end - 1)
00260 return;
00261
00262 start = addr;
00263 pgd = pgd_offset((*tlb)->mm, addr);
00264 do {
00265 next = pgd_addr_end(addr, end);
00266 if (pgd_none_or_clear_bad(pgd))
00267 continue;
00268 free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
00269 } while (pgd++, addr = next, addr != end);
00270
00271 if (!(*tlb)->fullmm)
00272 flush_tlb_pgtables((*tlb)->mm, start, end);
00273 }
00274
00275 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
00276 unsigned long floor, unsigned long ceiling)
00277 {
00278 while (vma) {
00279 struct vm_area_struct *next = vma->rde_ps_va_list_flink;
00280 unsigned long addr = vma->rde_pq_start_va;
00281
00282
00283
00284
00285 anon_vma_unlink(vma);
00286 unlink_file_vma(vma);
00287
00288 if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
00289 hugetlb_free_pgd_range(tlb, addr, vma->rde_pq_start_va + vma->rde$q_region_size,
00290 floor, next? next->rde_pq_start_va: ceiling);
00291 } else {
00292
00293
00294
00295 while (next && next->rde_pq_start_va <= (vma->rde$pq_start_va + vma->rde$q_region_size) + PMD_SIZE
00296 && !is_hugepage_only_range(vma->vm_mm, next->rde_pq_start_va,
00297 HPAGE_SIZE)) {
00298 vma = next;
00299 next = vma->rde_ps_va_list_flink;
00300 anon_vma_unlink(vma);
00301 unlink_file_vma(vma);
00302 }
00303 free_pgd_range(tlb, addr, vma->rde_pq_start_va + vma->rde$q_region_size,
00304 floor, next? next->rde_pq_start_va: ceiling);
00305 }
00306 vma = next;
00307 }
00308 }
00309 #endif
00310
00311 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
00312 {
00313 struct page *new = pte_alloc_one(mm, address);
00314 if (!new)
00315 return -ENOMEM;
00316
00317 pte_lock_init(new);
00318 spin_lock(&mm->page_table_lock);
00319 if (pmd_present(*pmd)) {
00320 pte_lock_deinit(new);
00321 pte_free(new);
00322 } else {
00323 #if 0
00324 mm->nr_ptes++;
00325 inc_page_state(nr_page_table_pages);
00326 #endif
00327 pmd_populate(mm, pmd, new);
00328 }
00329 spin_unlock(&mm->page_table_lock);
00330 return 0;
00331 }
00332
00333 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
00334 {
00335 #ifdef __x86_64__
00336 pte_t *new = pte_alloc_one_kernel(&init_mm, address);
00337 #else
00338 pte_t *new = pte_alloc_one(&init_mm, address);
00339 #endif
00340 if (!new)
00341 return -ENOMEM;
00342
00343 spin_lock(&init_mm.page_table_lock);
00344 if (pmd_present(*pmd))
00345 pte_free_kernel(new);
00346 else
00347 pmd_populate_kernel(&init_mm, pmd, new);
00348 spin_unlock(&init_mm.page_table_lock);
00349 return 0;
00350 }
00351
00352
00353
00354
00355 void __free_pte(pte_t pte)
00356 {
00357 struct page *page = pte_page(pte);
00358 if ((!VALID_PAGE(page)) || PageReserved(page))
00359 return;
00360 void fastcall set_page_dirty(struct page *page);
00361 if (pte_dirty(pte))
00362 set_page_dirty(page);
00363
00364 }
00365
00366
00367
00368
00369
00370
00371 static inline void free_one_pmd(pmd_t * dir)
00372 {
00373 pte_t * pte;
00374
00375 if (pmd_none(*dir))
00376 return;
00377 if (pmd_bad(*dir)) {
00378 pmd_ERROR(*dir);
00379 pmd_clear(dir);
00380 return;
00381 }
00382 pte = pte_offset(dir, 0);
00383 pmd_clear(dir);
00384 pte_free(pte);
00385 }
00386
00387 static inline void free_one_pud(pud_t * dir)
00388 {
00389 pmd_t * pmd;
00390
00391 if (pud_none(*dir))
00392 return;
00393 if (pud_bad(*dir)) {
00394 pud_ERROR(*dir);
00395 pud_clear(dir);
00396 return;
00397 }
00398 pmd = pmd_offset(dir, 0);
00399 pud_clear(dir);
00400 pmd_free(pmd);
00401 }
00402
00403 static inline void free_one_pgd(pgd_t * dir)
00404 {
00405 int j;
00406 pud_t * pud;
00407
00408 if (pgd_none(*dir))
00409 return;
00410 if (pgd_bad(*dir)) {
00411 pgd_ERROR(*dir);
00412 pgd_clear(dir);
00413 return;
00414 }
00415 pud = pud_offset(dir, 0);
00416 pgd_clear(dir);
00417 for (j = 0; j < PTRS_PER_PUD ; j++) {
00418 prefetchw(pud+j+(PREFETCH_STRIDE/16));
00419 free_one_pud(pud+j);
00420 }
00421 pud_free(pud);
00422 }
00423
00424
00425
00426
00427 int pgt_cache_water[2] = { 25, 50 };
00428
00429
00430 int check_pgt_cache(void)
00431 {
00432 return do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
00433 }
00434
00435
00436
00437
00438
00439
00440 void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
00441 {
00442 pgd_t * page_dir = mm->pgd;
00443
00444 spin_lock(&mm->page_table_lock);
00445 page_dir += first;
00446 do {
00447 free_one_pgd(page_dir);
00448 page_dir++;
00449 } while (--nr);
00450 spin_unlock(&mm->page_table_lock);
00451
00452
00453 check_pgt_cache();
00454 }
00455
00456 #define PTE_TABLE_MASK ((PTRS_PER_PTE-1) * sizeof(pte_t))
00457 #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
00458 #define PUD_TABLE_MASK ((PTRS_PER_PUD-1) * sizeof(pud_t))
00459
00460 static inline int is_cow_mapping(unsigned int flags)
00461 {
00462 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
00463 }
00464
00465
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483
00484
00485 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
00486 {
00487 unsigned long pfn = pte_pfn(pte);
00488
00489 if (vma->rde_l_flags & VM_PFNMAP) {
00490 unsigned long off = (addr - ((long)vma->rde_pq_start_va)) >> PAGE_SHIFT;
00491 if (pfn == 0 + off)
00492 return NULL;
00493 if (!is_cow_mapping(vma->rde_l_flags))
00494 return NULL;
00495 }
00496
00497
00498
00499
00500
00501
00502
00503
00504
00505 #if 0
00506 if (unlikely(!pfn_valid(pfn))) {
00507 print_bad_pte(vma, pte, addr);
00508 return NULL;
00509 }
00510 #endif
00511
00512
00513
00514
00515
00516
00517
00518
00519 return pfn_to_page(pfn);
00520 }
00521
00522
00523
00524
00525
00526
00527
00528 static inline void
00529 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
00530 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
00531 unsigned long addr, int *rss)
00532 {
00533 unsigned long vm_flags = ((struct _rde *)vma)->rde_l_flags;
00534 pte_t pte = *src_pte;
00535 struct page *page;
00536 unsigned long did_cow=0;
00537 struct page *ptepage = pte_page(pte);
00538 unsigned long address = addr;
00539
00540
00541 if (unlikely(!pte_present(pte))) {
00542 if (!pte_file(pte)) {
00543 #if 0
00544 swap_duplicate(pte_to_swp_entry(pte));
00545 #endif
00546
00547 if (unlikely(list_empty(&dst_mm->mmlist))) {
00548 spin_lock(&mmlist_lock);
00549 if (list_empty(&dst_mm->mmlist))
00550 list_add(&dst_mm->mmlist,
00551 &src_mm->mmlist);
00552 spin_unlock(&mmlist_lock);
00553 }
00554 }
00555 goto out_set_pte;
00556 }
00557
00558
00559
00560
00561
00562 if (is_cow_mapping(vm_flags)) {
00563 ptep_set_wrprotect(src_mm, addr, src_pte);
00564 pte = *src_pte;
00565 did_cow = 1;
00566 }
00567
00568
00569
00570
00571
00572 if (vm_flags & VM_SHARED)
00573 pte = pte_mkclean(pte);
00574 pte = pte_mkold(pte);
00575
00576 page = vm_normal_page(vma, addr, pte);
00577 if (page) {
00578
00579 #if 0
00580 page_dup_rmap(page);
00581 rss[!!PageAnon(page)]++;
00582 #endif
00583 ptepage->pfn_l_refcnt++;
00584 }
00585
00586 if (did_cow==0 && (ptepage->pfn_q_bak&PTE$M_TYP0)) {
00587 pte_t * mypte=&pte;
00588 signed long page = mmg_allocpfn();
00589 unsigned long address2 = (*(unsigned long *)src_pte)&0xfffff000;
00590
00591 ptepage->pfn_l_refcnt--;
00592 #if 0
00593 #ifdef __i386__
00594
00595 ptepage->pfn_l_pt_pfn=address2;
00596 #endif
00597 #endif
00598 mem_map[page]=*ptepage;
00599 *(unsigned long *)mypte=((unsigned long)(page*PAGE_SIZE))|((*(unsigned long *)mypte)&0xfff);
00600 memcpy(__va(page*PAGE_SIZE),__va(address2),PAGE_SIZE);
00601 } else {
00602 static int mydebugg = 0;
00603 if (mydebugg) {
00604 printk("%x %x %x %x\n",address,*dst_pte,*src_pte,src_pte);
00605 }
00606 }
00607
00608 out_set_pte:
00609 set_pte_at(dst_mm, addr, dst_pte, pte);
00610 }
00611
00612 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
00613 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
00614 unsigned long addr, unsigned long end)
00615 {
00616 pte_t *src_pte, *dst_pte;
00617 #if 0
00618 spinlock_t *src_ptl, *dst_ptl;
00619 #endif
00620 int progress = 0;
00621 int rss[2];
00622
00623 again:
00624 rss[1] = rss[0] = 0;
00625 #if 0
00626 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
00627 #else
00628 dst_pte = pte_alloc(dst_mm, dst_pmd, addr);
00629 #endif
00630 if (!dst_pte)
00631 return -ENOMEM;
00632 #if 0
00633 src_pte = pte_offset_map_nested(src_pmd, addr);
00634 src_ptl = pte_lockptr(src_mm, src_pmd);
00635 spin_lock(src_ptl);
00636 #else
00637 src_pte = pte_offset(src_pmd, addr);
00638 #endif
00639
00640 do {
00641
00642
00643
00644
00645 if (progress >= 32) {
00646 progress = 0;
00647 #if 0
00648 if (need_resched() ||
00649 need_lockbreak(src_ptl) ||
00650 need_lockbreak(dst_ptl))
00651 break;
00652 #endif
00653 }
00654 if (pte_none(*src_pte)) {
00655 progress++;
00656 continue;
00657 }
00658 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
00659 progress += 8;
00660 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
00661
00662 #if 0
00663 spin_unlock(src_ptl);
00664 #endif
00665 pte_unmap_nested(src_pte - 1);
00666 #if 0
00667 add_mm_rss(dst_mm, rss[0], rss[1]);
00668 #endif
00669 #if 0
00670 pte_unmap_unlock(dst_pte - 1, dst_ptl);
00671 #endif
00672 #if 0
00673 cond_resched();
00674 #endif
00675 if (addr != end)
00676 goto again;
00677 return 0;
00678 }
00679
00680 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
00681 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
00682 unsigned long addr, unsigned long end)
00683 {
00684 pmd_t *src_pmd, *dst_pmd;
00685 unsigned long next;
00686
00687 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
00688 if (!dst_pmd)
00689 return -ENOMEM;
00690 src_pmd = pmd_offset(src_pud, addr);
00691 do {
00692 next = pmd_addr_end(addr, end);
00693 if (pmd_none_or_clear_bad(src_pmd))
00694 continue;
00695 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
00696 vma, addr, next))
00697 return -ENOMEM;
00698 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
00699 return 0;
00700 }
00701
00702 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
00703 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
00704 unsigned long addr, unsigned long end)
00705 {
00706 pud_t *src_pud, *dst_pud;
00707 unsigned long next;
00708
00709 dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
00710 if (!dst_pud)
00711 return -ENOMEM;
00712 src_pud = pud_offset(src_pgd, addr);
00713 do {
00714 next = pud_addr_end(addr, end);
00715 if (pud_none_or_clear_bad(src_pud))
00716 continue;
00717 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
00718 vma, addr, next))
00719 return -ENOMEM;
00720 } while (dst_pud++, src_pud++, addr = next, addr != end);
00721 return 0;
00722 }
00723
00724 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
00725 struct vm_area_struct *vma)
00726 {
00727 pgd_t *src_pgd, *dst_pgd;
00728 unsigned long next;
00729 unsigned long addr = ((struct _rde *)vma)->rde_pq_start_va;
00730 unsigned long end = ((struct _rde *)vma)->rde_pq_start_va + ((struct _rde *)vma)->rde$q_region_size;
00731
00732
00733
00734
00735
00736
00737
00738 #if 0
00739
00740 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
00741 if (!vma->anon_vma)
00742 return 0;
00743 }
00744 #endif
00745
00746 #if 0
00747
00748 if (is_vm_hugetlb_page(vma))
00749 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
00750 #endif
00751
00752 dst_pgd = pgd_offset(dst_mm, addr);
00753 src_pgd = pgd_offset(src_mm, addr);
00754 do {
00755 next = pgd_addr_end(addr, end);
00756 if (pgd_none_or_clear_bad(src_pgd))
00757 continue;
00758 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
00759 vma, addr, next))
00760 return -ENOMEM;
00761 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
00762 return 0;
00763 }
00764
00765 #if 0
00766
00767
00768
00769
00770
00771
00772
00773
00774
00775
00776
00777
00778 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
00779 struct vm_area_struct *vma)
00780 {
00781 pgd_t * src_pgd, * dst_pgd;
00782 unsigned long address = ((struct _rde *)vma)->rde_pq_start_va;
00783 unsigned long end = ((struct _rde *)vma)->rde_pq_start_va + ((struct _rde *)vma)->rde$q_region_size;
00784 unsigned long cow = (((struct _rde *)vma)->rde_l_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
00785 unsigned long did_cow=0;
00786
00787 src_pgd = pgd_offset(src, address)-1;
00788 dst_pgd = pgd_offset(dst, address)-1;
00789
00790 for (;;) {
00791 pud_t * src_pud, * dst_pud;
00792
00793 src_pgd++; dst_pgd++;
00794
00795
00796
00797 if (pgd_none(*src_pgd))
00798 goto skip_copy_pud_range;
00799 if (pgd_bad(*src_pgd)) {
00800 pgd_ERROR(*src_pgd);
00801 pgd_clear(src_pgd);
00802 skip_copy_pud_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
00803 if (!address || (address >= end))
00804 goto out;
00805 continue;
00806 }
00807
00808 src_pud = pud_offset(src_pgd, address);
00809 dst_pud = pud_alloc(dst, dst_pgd, address);
00810 if (!dst_pud)
00811 goto nomem;
00812
00813 do {
00814 pte_t * src_pte, * dst_pte;
00815
00816
00817
00818 if (pud_none(*src_pud))
00819 goto skip_copy_pte_range;
00820 if (pud_bad(*src_pud)) {
00821 pud_ERROR(*src_pud);
00822 pud_clear(src_pud);
00823 skip_copy_pte_range: address = (address + PUD_SIZE) & PUD_MASK;
00824 if (address >= end)
00825 goto out;
00826 goto cont_copy_pud_range;
00827 }
00828
00829 src_pte = pte_offset(src_pmd, address);
00830 dst_pte = pte_alloc(dst, dst_pmd, address);
00831 if (!dst_pte)
00832 goto nomem;
00833
00834 spin_lock(&src->page_table_lock);
00835 do {
00836 pte_t pte = *src_pte;
00837 struct page *ptepage;
00838
00839
00840
00841 if (pte_none(pte))
00842 goto cont_copy_pte_range_noset;
00843 if (!pte_present(pte)) {
00844 #if 0
00845 swap_duplicate(pte_to_swp_entry(pte));
00846 #endif
00847 goto cont_copy_pte_range;
00848 }
00849 ptepage = pte_page(pte);
00850 if ((!VALID_PAGE(ptepage)) ||
00851 PageReserved(ptepage))
00852 goto cont_copy_pte_range;
00853
00854
00855 if (cow && pte_write(pte)) {
00856 ptep_set_wrprotect(src_pte);
00857 pte = *src_pte;
00858 did_cow=1;
00859 if (0) {
00860 pte_t * mypte=&pte;
00861 signed long page = mmg_allocpfn();
00862 unsigned long address2 = (*(unsigned long *)src_pte)&0xfffff000;
00863 #if 0
00864 mem_map[page].virtual=__va(page*PAGE_SIZE);
00865 #endif
00866 *(unsigned long *)mypte=(__va(page*PAGE_SIZE));
00867 *(unsigned long *)mypte|=_PAGE_PRESENT;
00868 *(unsigned long *)mypte|=_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY;
00869
00870 memcpy(__va(page*PAGE_SIZE),address2,PAGE_SIZE);
00871
00872
00873 }
00874 } else {
00875 did_cow=0;
00876 }
00877
00878
00879 if (((struct _rde *)vma)->rde_l_flags & VM_SHARED)
00880 pte = pte_mkclean(pte);
00881 pte = pte_mkold(pte);
00882
00883 ptepage->pfn_l_refcnt++;
00884 dst->rss++;
00885
00886 if (did_cow==0 && (ptepage->pfn_q_bak&PTE$M_TYP0)) {
00887 pte_t * mypte=&pte;
00888 signed long page = mmg_allocpfn();
00889 unsigned long address2 = (*(unsigned long *)src_pte)&0xfffff000;
00890 #if 0
00891 struct _wsl * wsl = srcphd->phd_l_wslist;
00892 struct _wsl * wsle = &wsl[ptepage->pfn_l_wslx_qw];
00893 #endif
00894
00895 ptepage->pfn_l_refcnt--;
00896 #if 0
00897 #ifdef __i386__
00898
00899 ptepage->pfn_l_pt_pfn=address2;
00900 #endif
00901 #endif
00902 mem_map[page]=*ptepage;
00903 #if 0
00904 wsle->wsl_v_valid=1;
00905 wsle->wsl_v_pagtyp=mem_map[page].pfn$v_pagtyp;
00906 mem_map[page].virtual=__va(page*PAGE_SIZE);
00907 ((unsigned long)wsle->wsl_pq_va)|=(unsigned long)mem_map[page].virtual;
00908 #endif
00909 #ifdef __arch_um__
00910 *(unsigned long *)mypte=((unsigned long)(__va(page*PAGE_SIZE)))|((*(unsigned long *)mypte)&0xfff);
00911 #else
00912 *(unsigned long *)mypte=((unsigned long)(page*PAGE_SIZE))|((*(unsigned long *)mypte)&0xfff);
00913 #endif
00914
00915
00916 #ifdef __arch_um__
00917
00918 #endif
00919 if (0 && __va(page*PAGE_SIZE)>0xc0500000) goto there;
00920 if (0 && ( address2<0x100000 || __va(page*PAGE_SIZE)<0x100000)) {
00921 there:
00922 printk("%x %x %x %x %x\n",address2,page,__va(page*PAGE_SIZE),src_pte,*src_pte);
00923 #ifdef __arch_i386__
00924 sickinsque(4,4);
00925 #endif
00926 panic("die is cast\n");
00927 }
00928 #ifdef __arch_um__
00929 memcpy(__va(page*PAGE_SIZE),address2,PAGE_SIZE);
00930 #else
00931 memcpy(__va(page*PAGE_SIZE),__va(address2),PAGE_SIZE);
00932 #endif
00933 } else {
00934 static int mydebugg = 0;
00935 if (mydebugg) {
00936 printk("%x %x %x %x\n",address,*dst_pte,*src_pte,src_pte);
00937 }
00938 }
00939
00940 cont_copy_pte_range: set_pte(dst_pte, pte);
00941 if (0) {
00942 unsigned long * l=dst_pte;
00943 *l&=0xfffff000;
00944 *l|=((*(unsigned long *)src_pte)&0xfff);
00945 }
00946 cont_copy_pte_range_noset: address += PAGE_SIZE;
00947 if (address >= end)
00948 goto out_unlock;
00949 src_pte++;
00950 dst_pte++;
00951 } while ((unsigned long)src_pte & PTE_TABLE_MASK);
00952 spin_unlock(&src->page_table_lock);
00953
00954 cont_copy_pmd_range: src_pmd++;
00955 dst_pmd++;
00956 } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
00957 }
00958 out_unlock:
00959 spin_unlock(&src->page_table_lock);
00960 out:
00961 return 0;
00962 nomem:
00963 return -ENOMEM;
00964 }
00965 #endif
00966
00967
00968
00969
00970 static inline void forget_pte(pte_t page)
00971 {
00972 if (!pte_none(page)) {
00973 printk("forget_pte: old mapping existed!\n");
00974 BUG();
00975 }
00976 }
00977
00978 static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size)
00979 {
00980 unsigned long offset;
00981 pte_t * ptep;
00982 int freed = 0;
00983
00984 if (pmd_none(*pmd))
00985 return 0;
00986 if (pmd_bad(*pmd)) {
00987 pmd_ERROR(*pmd);
00988 pmd_clear(pmd);
00989 return 0;
00990 }
00991 ptep = pte_offset(pmd, address);
00992 offset = address & ~PMD_MASK;
00993 if (offset + size > PMD_SIZE)
00994 size = PMD_SIZE - offset;
00995 size &= PAGE_MASK;
00996 for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
00997 pte_t pte = *ptep;
00998 if (pte_none(pte))
00999 continue;
01000 if (pte_present(pte)) {
01001 struct page *page = pte_page(pte);
01002 if (VALID_PAGE(page) && !PageReserved(page))
01003 freed ++;
01004
01005 tlb_remove_page(tlb, ptep, address + offset);
01006 } else {
01007 #if 0
01008 free_swap_and_cache(pte_to_swp_entry(pte));
01009 #endif
01010 pte_clear(42, 42, ptep);
01011 }
01012 }
01013
01014 return freed;
01015 }
01016
01017 static inline int zap_pmd_range(mmu_gather_t *tlb, pud_t * dir, unsigned long address, unsigned long size)
01018 {
01019 pmd_t * pmd;
01020 unsigned long end;
01021 int freed;
01022
01023 if (pud_none(*dir))
01024 return 0;
01025 if (pud_bad(*dir)) {
01026 pud_ERROR(*dir);
01027 pud_clear(dir);
01028 return 0;
01029 }
01030 pmd = pmd_offset(dir, address);
01031 end = address + size;
01032 if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
01033 end = ((address + PGDIR_SIZE) & PGDIR_MASK);
01034 freed = 0;
01035 do {
01036 freed += zap_pte_range(tlb, pmd, address, end - address);
01037 address = (address + PMD_SIZE) & PMD_MASK;
01038 pmd++;
01039 } while (address < end);
01040 return freed;
01041 }
01042
01043 static inline int zap_pud_range(mmu_gather_t *tlb, pgd_t * dir, unsigned long address, unsigned long size)
01044 {
01045 pud_t * pud;
01046 unsigned long end;
01047 int freed;
01048
01049 if (pgd_none(*dir))
01050 return 0;
01051 if (pgd_bad(*dir)) {
01052 pgd_ERROR(*dir);
01053 pgd_clear(dir);
01054 return 0;
01055 }
01056 pud = pud_offset(dir, address);
01057 end = address + size;
01058 if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
01059 end = ((address + PGDIR_SIZE) & PGDIR_MASK);
01060 freed = 0;
01061 do {
01062 freed += zap_pmd_range(tlb, pud, address, end - address);
01063 address = (address + PUD_SIZE) & PUD_MASK;
01064 pud++;
01065 } while (address < end);
01066 return freed;
01067 }
01068
01069
01070
01071
01072 void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
01073 {
01074 mmu_gather_t *tlb;
01075 pgd_t * dir;
01076 unsigned long start = address, end = address + size;
01077 int freed = 0;
01078 struct _va_range inadr;
01079
01080 inadr.va_range_ps_start_va=address;
01081 inadr.va_range_ps_end_va=address+size;
01082
01083 dir = pgd_offset(mm, address);
01084
01085
01086
01087
01088
01089
01090
01091
01092 if (address >= end)
01093 BUG();
01094 spin_lock(&mm->page_table_lock);
01095 exe_deltva(&inadr,0,0);
01096
01097 flush_cache_range(mm, address, end);
01098 tlb = tlb_gather_mmu(mm);
01099
01100 do {
01101 freed += zap_pud_range(tlb, dir, address, end - address);
01102 address = (address + PGDIR_SIZE) & PGDIR_MASK;
01103 dir++;
01104 } while (address && (address < end));
01105
01106
01107 tlb_finish_mmu(tlb, start, end);
01108
01109
01110
01111
01112
01113 if (mm->rss > freed)
01114 mm->rss -= freed;
01115 else
01116 mm->rss = 0;
01117 spin_unlock(&mm->page_table_lock);
01118 }
01119
01120
01121
01122
01123 static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write)
01124 {
01125 pgd_t *pgd;
01126 pud_t *pud;
01127 pmd_t *pmd;
01128 pte_t *ptep, pte;
01129
01130 pgd = pgd_offset(mm, address);
01131 if (pgd_none(*pgd) || pgd_bad(*pgd))
01132 goto out;
01133
01134 pud = pud_offset(pgd, address);
01135 if (pud_none(*pud) || pud_bad(*pud))
01136 goto out;
01137
01138 pmd = pmd_offset(pud, address);
01139 if (pmd_none(*pmd) || pmd_bad(*pmd))
01140 goto out;
01141
01142 ptep = pte_offset(pmd, address);
01143 if (!ptep)
01144 goto out;
01145
01146 pte = *ptep;
01147 if (pte_present(pte)) {
01148 if (!write ||
01149 (pte_write(pte) && pte_dirty(pte)))
01150 return pte_page(pte);
01151 }
01152
01153 out:
01154 return 0;
01155 }
01156
01157
01158
01159
01160
01161
01162
01163 static inline struct page * get_page_map(struct page *page)
01164 {
01165 if (!VALID_PAGE(page))
01166 return 0;
01167 return page;
01168 }
01169
01170
01171
01172
01173
01174 #define dprintk(x...)
01175
01176 int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len)
01177 {
01178 int pgcount, err;
01179 struct mm_struct * mm;
01180
01181
01182 if (iobuf->nr_pages)
01183 return -EINVAL;
01184
01185 mm = current->mm;
01186 dprintk ("map_user_kiobuf: begin\n");
01187
01188 pgcount = (va + len + PAGE_SIZE - 1)/PAGE_SIZE - va/PAGE_SIZE;
01189
01190 if (!pgcount) BUG();
01191 err = expand_kiobuf(iobuf, pgcount);
01192 if (err)
01193 return err;
01194
01195 iobuf->locked = 0;
01196 iobuf->offset = va & (PAGE_SIZE-1);
01197 iobuf->length = len;
01198
01199
01200 down_read(&mm->mmap_sem);
01201
01202 #if 0
01203 err = get_user_pages(current, mm, va, pgcount,
01204 (rw==READ), 0, iobuf->maplist, NULL);
01205 #endif
01206 up_read(&mm->mmap_sem);
01207 if (err < 0) {
01208 unmap_kiobuf(iobuf);
01209 dprintk ("map_user_kiobuf: end %d\n", err);
01210 return err;
01211 }
01212 iobuf->nr_pages = err;
01213 while (pgcount--) {
01214
01215
01216
01217 flush_dcache_page(iobuf->maplist[pgcount]);
01218 }
01219 dprintk ("map_user_kiobuf: end OK\n");
01220 return 0;
01221 }
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232 void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes)
01233 {
01234 int index, offset, remaining;
01235 struct page *page;
01236
01237 index = iobuf->offset >> PAGE_SHIFT;
01238 offset = iobuf->offset & ~PAGE_MASK;
01239 remaining = bytes;
01240 if (remaining > iobuf->length)
01241 remaining = iobuf->length;
01242
01243 while (remaining > 0 && index < iobuf->nr_pages) {
01244 page = iobuf->maplist[index];
01245
01246 #if 0
01247 if (!PageReserved(page))
01248 SetPageDirty(page);
01249 #endif
01250
01251 remaining -= (PAGE_SIZE - offset);
01252 offset = 0;
01253 index++;
01254 }
01255 }
01256
01257
01258
01259
01260
01261
01262 void unmap_kiobuf (struct kiobuf *iobuf)
01263 {
01264 int i;
01265 struct page *map;
01266
01267 for (i = 0; i < iobuf->nr_pages; i++) {
01268 map = iobuf->maplist[i];
01269 if (map) {
01270 #if 0
01271 if (iobuf->locked)
01272 UnlockPage(map);
01273 #endif
01274
01275
01276
01277 page_cache_release(map);
01278 }
01279 }
01280
01281 iobuf->nr_pages = 0;
01282 iobuf->locked = 0;
01283 }
01284
01285
01286
01287
01288
01289
01290
01291
01292
01293
01294
01295
01296 int lock_kiovec(int nr, struct kiobuf *iovec[], int wait)
01297 {
01298 struct kiobuf *iobuf;
01299 int i, j;
01300 struct page *page, **ppage;
01301 int doublepage = 0;
01302 int repeat = 0;
01303
01304 repeat:
01305
01306 for (i = 0; i < nr; i++) {
01307 iobuf = iovec[i];
01308
01309 if (iobuf->locked)
01310 continue;
01311
01312 ppage = iobuf->maplist;
01313 for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
01314 page = *ppage;
01315 if (!page)
01316 continue;
01317
01318 #if 0
01319 if (TryLockPage(page)) {
01320 while (j--) {
01321 struct page *tmp = *--ppage;
01322 if (tmp)
01323 UnlockPage(tmp);
01324 }
01325 goto retry;
01326 }
01327 #endif
01328 }
01329 iobuf->locked = 1;
01330 }
01331
01332 return 0;
01333
01334 retry:
01335
01336
01337
01338
01339
01340
01341 unlock_kiovec(nr, iovec);
01342 if (!wait)
01343 return -EAGAIN;
01344
01345
01346
01347
01348 #if 0
01349 if (!PageLocked(page)) {
01350
01351
01352
01353
01354
01355
01356
01357 if (++doublepage >= 3)
01358 return -EINVAL;
01359
01360
01361
01362 }
01363 #endif
01364
01365 if (++repeat < 16)
01366 goto repeat;
01367 return -EAGAIN;
01368 }
01369
01370
01371
01372
01373
01374 int unlock_kiovec(int nr, struct kiobuf *iovec[])
01375 {
01376 struct kiobuf *iobuf;
01377 int i, j;
01378 struct page *page, **ppage;
01379
01380 for (i = 0; i < nr; i++) {
01381 iobuf = iovec[i];
01382
01383 if (!iobuf->locked)
01384 continue;
01385 iobuf->locked = 0;
01386
01387 ppage = iobuf->maplist;
01388 for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
01389 page = *ppage;
01390 if (!page)
01391 continue;
01392 #if 0
01393 UnlockPage(page);
01394 #endif
01395 }
01396 }
01397 return 0;
01398 }
01399
01400 static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
01401 unsigned long size, pgprot_t prot)
01402 {
01403 unsigned long end;
01404
01405 address &= ~PMD_MASK;
01406 end = address + size;
01407 if (end > PMD_SIZE)
01408 end = PMD_SIZE;
01409 do {
01410 pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
01411 pte_t oldpage = ptep_get_and_clear(42, 42, pte);
01412 set_pte(pte, zero_pte);
01413 forget_pte(oldpage);
01414 address += PAGE_SIZE;
01415 pte++;
01416 } while (address && (address < end));
01417 }
01418
01419 static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
01420 unsigned long size, pgprot_t prot)
01421 {
01422 unsigned long end;
01423
01424 address &= ~PGDIR_MASK;
01425 end = address + size;
01426 if (end > PGDIR_SIZE)
01427 end = PGDIR_SIZE;
01428 do {
01429 pte_t * pte = pte_alloc(mm, pmd, address);
01430 if (!pte)
01431 return -ENOMEM;
01432 zeromap_pte_range(pte, address, end - address, prot);
01433 address = (address + PMD_SIZE) & PMD_MASK;
01434 pmd++;
01435 } while (address && (address < end));
01436 return 0;
01437 }
01438
01439 int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
01440 {
01441 int error = 0;
01442 pgd_t * dir;
01443 unsigned long beg = address;
01444 unsigned long end = address + size;
01445 struct mm_struct *mm = current->mm;
01446
01447 dir = pgd_offset(mm, address);
01448 flush_cache_range(mm, beg, end);
01449 if (address >= end)
01450 BUG();
01451
01452 spin_lock(&mm->page_table_lock);
01453 do {
01454 pmd_t *pmd = pmd_alloc(mm, dir, address);
01455 error = -ENOMEM;
01456 if (!pmd)
01457 break;
01458 error = zeromap_pmd_range(mm, pmd, address, end - address, prot);
01459 if (error)
01460 break;
01461 address = (address + PGDIR_SIZE) & PGDIR_MASK;
01462 dir++;
01463 } while (address && (address < end));
01464 spin_unlock(&mm->page_table_lock);
01465 flush_tlb_range(mm, beg, end);
01466 return error;
01467 }
01468
01469
01470
01471
01472
01473
01474 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
01475 unsigned long phys_addr, pgprot_t prot)
01476 {
01477 unsigned long end;
01478
01479 address &= ~PMD_MASK;
01480 end = address + size;
01481 if (end > PMD_SIZE)
01482 end = PMD_SIZE;
01483 do {
01484 struct page *page;
01485 pte_t oldpage;
01486 oldpage = ptep_get_and_clear(42, 42, pte);
01487
01488 page = virt_to_page(__va(phys_addr));
01489 if ((!VALID_PAGE(page)) || PageReserved(page))
01490 set_pte(pte, mk_pte_phys(phys_addr, prot));
01491 forget_pte(oldpage);
01492 address += PAGE_SIZE;
01493 phys_addr += PAGE_SIZE;
01494 pte++;
01495 } while (address && (address < end));
01496 }
01497
01498 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
01499 unsigned long phys_addr, pgprot_t prot)
01500 {
01501 unsigned long end;
01502
01503 address &= ~PGDIR_MASK;
01504 end = address + size;
01505 if (end > PGDIR_SIZE)
01506 end = PGDIR_SIZE;
01507 phys_addr -= address;
01508 do {
01509 pte_t * pte = pte_alloc(mm, pmd, address);
01510 if (!pte)
01511 return -ENOMEM;
01512 remap_pte_range(pte, address, end - address, address + phys_addr, prot);
01513 address = (address + PMD_SIZE) & PMD_MASK;
01514 pmd++;
01515 } while (address && (address < end));
01516 return 0;
01517 }
01518
01519 static inline int remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
01520 unsigned long phys_addr, pgprot_t prot)
01521 {
01522 unsigned long end;
01523
01524 address &= ~PGDIR_MASK;
01525 end = address + size;
01526 if (end > PGDIR_SIZE)
01527 end = PGDIR_SIZE;
01528 phys_addr -= address;
01529 do {
01530 pte_t * pte = pte_alloc(mm, pud, address);
01531 if (!pte)
01532 return -ENOMEM;
01533 remap_pte_range(pte, address, end - address, address + phys_addr, prot);
01534 address = (address + PUD_SIZE) & PUD_MASK;
01535 pud++;
01536 } while (address && (address < end));
01537 return 0;
01538 }
01539
01540
01541 int remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
01542 {
01543 int error = 0;
01544 pgd_t * dir;
01545 unsigned long beg = from;
01546 unsigned long end = from + size;
01547 struct mm_struct *mm = current->mm;
01548
01549 phys_addr -= from;
01550 dir = pgd_offset(mm, from);
01551 flush_cache_range(mm, beg, end);
01552 if (from >= end)
01553 BUG();
01554
01555 spin_lock(&mm->page_table_lock);
01556 do {
01557 pud_t *pud = pud_alloc(mm, dir, from);
01558 error = -ENOMEM;
01559 if (!pud)
01560 break;
01561 error = remap_pud_range(mm, pud, from, end - from, phys_addr + from, prot);
01562 if (error)
01563 break;
01564 from = (from + PGDIR_SIZE) & PGDIR_MASK;
01565 dir++;
01566 } while (from && (from < end));
01567 spin_unlock(&mm->page_table_lock);
01568 flush_tlb_range(mm, beg, end);
01569 return error;
01570 }
01571
01572
01573
01574
01575
01576
01577
01578
01579
01580 static inline void establish_pte(struct _rde * vma, unsigned long address, pte_t *page_table, pte_t entry)
01581 {
01582 set_pte(page_table, entry);
01583 flush_tlb_page2(current->mm, address);
01584 update_mmu_cache(vma, address, entry);
01585 }
01586
01587
01588
01589
01590 static inline void break_cow(struct _rde * vma, struct page * new_page, unsigned long address,
01591 pte_t *page_table)
01592 {
01593 flush_page_to_ram(new_page);
01594 flush_cache_page(vma, address);
01595 establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, (x_to_prot(vma->rde_r_regprot.regprt$l_region_prot))))));
01596 }
01597
01598
01599
01600
01601
01602
01603
01604
01605
01606
01607
01608
01609
01610
01611
01612
01613
01614
01615
01616
01617
01618 int do_wp_page(struct mm_struct *mm, struct _rde * vma,
01619 unsigned long address, pte_t *page_table, pte_t pte)
01620 {
01621 struct page *old_page, *new_page;
01622
01623 old_page = pte_page(pte);
01624 if (!VALID_PAGE(old_page))
01625 goto bad_wp_page;
01626
01627 if (1) {
01628 int reuse = can_share_swap_page(old_page);
01629 #if 0
01630 unlock_page(old_page);
01631 #endif
01632 if (reuse) {
01633 flush_cache_page(vma, address);
01634 establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
01635 spin_unlock(&mm->page_table_lock);
01636 return 1;
01637 }
01638 }
01639
01640
01641
01642
01643 page_cache_get(old_page);
01644 spin_unlock(&mm->page_table_lock);
01645
01646 new_page = alloc_page(GFP_HIGHUSER);
01647 if (!new_page)
01648 goto no_mem;
01649 new_page->pfn_l_page_state=old_page->pfn$l_page_state;
01650 new_page->pfn_l_wslx_qw=old_page->pfn$l_wslx_qw;
01651 new_page->pfn_q_pte_index=findpte_new(mm,address);
01652 new_page->pfn_q_bak=old_page->pfn$q_bak;
01653 new_page->pfn_l_refcnt=1;
01654 copy_cow_page(old_page,new_page,address);
01655
01656
01657
01658
01659 spin_lock(&mm->page_table_lock);
01660 if (pte_same(*page_table, pte)) {
01661 #if 0
01662 if (PageReserved(old_page))
01663 ++mm->rss;
01664 #endif
01665 break_cow(vma, new_page, address, page_table);
01666
01667
01668
01669 new_page = old_page;
01670 }
01671 spin_unlock(&mm->page_table_lock);
01672 page_cache_release(new_page);
01673 page_cache_release(old_page);
01674 return 1;
01675
01676 bad_wp_page:
01677 spin_unlock(&mm->page_table_lock);
01678 printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
01679 return -1;
01680 no_mem:
01681 page_cache_release(old_page);
01682 return -1;
01683 }
01684
01685 static void vmtruncate_list(struct _rde *mpnt, unsigned long pgoff)
01686 {
01687 do {
01688 struct mm_struct *mm = 0;
01689 unsigned long start = mpnt->rde_pq_start_va;
01690 unsigned long end = mpnt->rde_pq_start_va + mpnt->rde$q_region_size;
01691 unsigned long len = end - start;
01692 unsigned long diff;
01693
01694
01695 if (0 >= pgoff) {
01696 zap_page_range(mm, start, len);
01697 continue;
01698 }
01699
01700
01701 len = len >> PAGE_SHIFT;
01702 diff = pgoff - 0 ;
01703 if (diff >= len)
01704 continue;
01705
01706
01707 start += diff << PAGE_SHIFT;
01708 len = (len - diff) << PAGE_SHIFT;
01709 zap_page_range(mm, start, len);
01710 } while ((mpnt = 0 ) != NULL);
01711 }
01712
01713
01714
01715
01716
01717
01718
01719
01720
01721 int vmtruncate(struct _fcb * inode, loff_t offset)
01722 {
01723 #if 0
01724 unsigned long pgoff;
01725 struct address_space *mapping = inode->i_mapping;
01726 unsigned long limit;
01727
01728 if (inode->fcb_l_filesize < offset)
01729 goto do_expand;
01730 inode->fcb_l_filesize = offset;
01731 spin_lock(&mapping->i_shared_lock);
01732 if (!mapping->i_mmap && !mapping->i_mmap_shared)
01733 goto out_unlock;
01734
01735 pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
01736 if (mapping->i_mmap != NULL)
01737 vmtruncate_list(mapping->i_mmap, pgoff);
01738 if (mapping->i_mmap_shared != NULL)
01739 vmtruncate_list(mapping->i_mmap_shared, pgoff);
01740
01741 out_unlock:
01742 spin_unlock(&mapping->i_shared_lock);
01743 truncate_inode_pages(mapping, offset);
01744 goto out_truncate;
01745
01746 do_expand:
01747 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
01748 if (limit != RLIM_INFINITY && offset > limit)
01749 goto out_sig;
01750 if (offset > inode->i_sb->s_maxbytes)
01751 goto out;
01752 inode->fcb_l_filesize = offset;
01753
01754 out_truncate:
01755 if (inode->i_op && inode->i_op->truncate) {
01756 lock_kernel();
01757 inode->i_op->truncate(inode);
01758 unlock_kernel();
01759 }
01760 return 0;
01761 out_sig:
01762 send_sig(SIGXFSZ, current, 0);
01763 out:
01764 return -EFBIG;
01765 #else
01766 return -EPERM;
01767 #endif
01768 }
01769
01770
01771
01772
01773
01774
01775
01776
01777
01778
01779 pud_t * fastcall __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
01780 {
01781 pud_t *new = 0;
01782
01783
01784 if (!new) {
01785 spin_unlock(&mm->page_table_lock);
01786 new = pud_alloc_one(mm, address);
01787 spin_lock(&mm->page_table_lock);
01788 if (!new)
01789 return NULL;
01790
01791
01792
01793
01794
01795 if (!pgd_none(*pgd)) {
01796 pud_free(new);
01797 goto out;
01798 }
01799 }
01800 pgd_populate(mm, pgd, new);
01801 out:
01802 return pud_offset(pgd, address);
01803 }
01804
01805
01806
01807
01808
01809
01810
01811
01812
01813
01814 pmd_t * fastcall __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
01815 {
01816 pmd_t *new;
01817
01818
01819 new = pmd_alloc_one_fast(mm, address);
01820 if (!new) {
01821 spin_unlock(&mm->page_table_lock);
01822 new = pmd_alloc_one(mm, address);
01823 spin_lock(&mm->page_table_lock);
01824 if (!new)
01825 return NULL;
01826
01827
01828
01829
01830
01831 if (!pud_none(*pud)) {
01832 pmd_free(new);
01833 goto out;
01834 }
01835 }
01836 pud_populate(mm, pud, new);
01837 out:
01838 return pmd_offset(pud, address);
01839 }
01840
01841
01842
01843
01844
01845
01846
01847 pte_t * fastcall pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
01848 {
01849 if (pmd_none(*pmd)) {
01850 pte_t *new;
01851
01852
01853 new = pte_alloc_one_fast(mm, address);
01854 if (!new) {
01855 spin_unlock(&mm->page_table_lock);
01856 new = pte_alloc_one(mm, address);
01857 spin_lock(&mm->page_table_lock);
01858 if (!new)
01859 return NULL;
01860
01861
01862
01863
01864
01865 if (!pmd_none(*pmd)) {
01866 pte_free(new);
01867 goto out;
01868 }
01869 }
01870 pmd_populate(mm, pmd, new);
01871 }
01872 out:
01873 return pte_offset(pmd, address);
01874 }
01875
01876 int make_pages_present(unsigned long addr, unsigned long end)
01877 {
01878 int ret = 0, len, write;
01879 struct _rde * vma;
01880
01881
01882 vma = find_vma(current->pcb_l_phd,addr);
01883 write = (((struct _rde *) vma)->rde_l_flags & VM_WRITE) != 0;
01884 if (addr >= end)
01885 BUG();
01886 if (end > ((unsigned long long)((struct _rde *) vma->rde_pq_start_va) + (unsigned long long)((struct _rde *) vma->rde$q_region_size)))
01887 BUG();
01888 len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
01889 #if 0
01890 ret = get_user_pages(current, current->mm, addr,
01891 len, write, 0, NULL, NULL);
01892 #endif
01893 return ret == len ? 0 : -1;
01894 }
01895