00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include <linux/config.h>
00013 #include <linux/slab.h>
00014 #include <linux/shm.h>
00015 #include <linux/mman.h>
00016 #include <linux/pagemap.h>
00017 #include <linux/swap.h>
00018 #include <linux/swapctl.h>
00019 #include <linux/smp_lock.h>
00020 #include <linux/init.h>
00021 #include <linux/file.h>
00022 #include <linux/fs.h>
00023 #include <linux/personality.h>
00024
00025 #include <asm/uaccess.h>
00026 #include <asm/pgalloc.h>
00027
00028 #include <ipldef.h>
00029 #include <phddef.h>
00030 #include <rdedef.h>
00031 #include <va_rangedef.h>
00032 #include <dyndef.h>
00033 #include <queue.h>
00034 #include <misc_routines.h>
00035 #include <exe_routines.h>
00036 #include <rabdef.h>
00037 #include <fabdef.h>
00038
00039 struct vm_area_struct * find_vma_intersection2(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr);
00040
00041
00042
00043
00044
00045 #undef DEBUG_MM_RB
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062 pgprot_t protection_map[16] = {
00063 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
00064 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
00065 };
00066
00067 int sysctl_overcommit_memory;
00068
00069
00070
00071
00072 int vm_enough_memory(long pages)
00073 {
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084 unsigned long free;
00085
00086
00087 if (sysctl_overcommit_memory)
00088 return 1;
00089
00090
00091 free = 0;
00092 free += nr_free_pages();
00093 free += nr_swap_pages;
00094
00095
00096
00097
00098
00099
00100
00101 free += swapper_space.nrpages;
00102
00103
00104
00105
00106
00107
00108
00109 #if 0
00110 free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT;
00111 free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT;
00112 #endif
00113
00114 return free > pages;
00115 }
00116
00117 void lock_vma_mappings(struct vm_area_struct *vma)
00118 {
00119 struct address_space *mapping;
00120
00121 mapping = NULL;
00122 #if 0
00123 if (vma->vm_file)
00124 mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
00125 #endif
00126 if (mapping)
00127 spin_lock(&mapping->i_shared_lock);
00128 }
00129
00130 void unlock_vma_mappings(struct vm_area_struct *vma)
00131 {
00132 struct address_space *mapping;
00133
00134 mapping = NULL;
00135 #if 0
00136 if (vma->vm_file)
00137 mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
00138 #endif
00139 if (mapping)
00140 spin_unlock(&mapping->i_shared_lock);
00141 }
00142
00143
00144
00145
00146
00147
00148
00149
00150 asmlinkage unsigned long sys_brk(unsigned long brk)
00151 {
00152 unsigned long rlim, retval;
00153 unsigned long newbrk, oldbrk;
00154 struct mm_struct *mm = current->mm;
00155
00156 down_write(&mm->mmap_sem);
00157
00158 if (brk < mm->end_code)
00159 goto out;
00160 newbrk = PAGE_ALIGN(brk);
00161 oldbrk = PAGE_ALIGN(mm->brk);
00162 if (oldbrk == newbrk)
00163 goto set_brk;
00164
00165
00166 if (brk <= mm->brk) {
00167 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
00168 goto set_brk;
00169 goto out;
00170 }
00171
00172
00173 rlim = current->rlim[RLIMIT_DATA].rlim_cur;
00174 if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
00175 goto out;
00176
00177
00178 if (find_vma_intersection2(current->pcb_l_phd, oldbrk, newbrk+PAGE_SIZE))
00179 goto out;
00180
00181
00182 if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
00183 goto out;
00184
00185
00186 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
00187 goto out;
00188 set_brk:
00189 mm->brk = brk;
00190 out:
00191 retval = mm->brk;
00192 up_write(&mm->mmap_sem);
00193 return retval;
00194 }
00195
00196
00197
00198
00199
00200 static inline unsigned long calc_rde_l_flags(unsigned long prot, unsigned long flags)
00201 {
00202 #define _trans(x,bit1,bit2) \
00203 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
00204
00205 unsigned long prot_bits, flag_bits;
00206 prot_bits =
00207 _trans(prot, PROT_READ, VM_READ) |
00208 _trans(prot, PROT_WRITE, VM_WRITE) |
00209 _trans(prot, PROT_EXEC, VM_EXEC);
00210 flag_bits =
00211 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
00212 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
00213 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
00214 return (prot_bits | flag_bits);
00215 #undef _trans
00216 }
00217
00218 #ifdef DEBUG_MM_RB
00219 static int browse_rb(rb_node_t * rb_node) {
00220 int i = 0;
00221 if (rb_node) {
00222 i++;
00223 i += browse_rb(rb_node->rb_left);
00224 i += browse_rb(rb_node->rb_right);
00225 }
00226 return i;
00227 }
00228
00229 static void validate_mm(struct mm_struct * mm) {
00230 int bug = 0;
00231 int i = 0;
00232 struct _rde * tmp = mm->mmap;
00233 while (tmp) {
00234 tmp = tmp->vm_next;
00235 i++;
00236 }
00237 if (i != mm->map_count)
00238 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
00239 i = browse_rb(mm->mm_rb.rb_node);
00240 if (i != mm->map_count)
00241 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
00242 if (bug)
00243 BUG();
00244 }
00245 #else
00246 #define validate_mm(mm) do { } while (0)
00247 #endif
00248
00249 #if 0
00250 static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
00251 rb_node_t * rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags)
00252 {
00253 spinlock_t * lock = &mm->page_table_lock;
00254 if (prev->vm_end == addr && can_vma_merge(prev, vm_flags)) {
00255 struct vm_area_struct * next;
00256
00257 spin_lock(lock);
00258 prev->vm_end = end;
00259 next = prev->vm_next;
00260 if (next && prev->vm_end == next->vm_start && can_vma_merge(next, vm_flags)) {
00261 prev->vm_end = next->vm_end;
00262 __vma_unlink(mm, next, prev);
00263 spin_unlock(lock);
00264
00265 mm->map_count--;
00266 kmem_cache_free(vm_area_cachep, next);
00267 return 1;
00268 }
00269 spin_unlock(lock);
00270 return 1;
00271 }
00272
00273 prev = prev->vm_next;
00274 if (prev) {
00275 merge_next:
00276 if (!can_vma_merge(prev, vm_flags))
00277 return 0;
00278 if (end == prev->vm_start) {
00279 spin_lock(lock);
00280 prev->vm_start = addr;
00281 spin_unlock(lock);
00282 return 1;
00283 }
00284 }
00285
00286 return 0;
00287 }
00288 #endif
00289
00290 unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,
00291 unsigned long prot, unsigned long flags, unsigned long pgoff)
00292 {
00293 struct mm_struct * mm = current->mm;
00294 struct _rde * vma, * prev;
00295 unsigned int rde_l_flags;
00296 int correct_wcount = 0;
00297 int error;
00298 rb_node_t ** rb_link, * rb_parent;
00299 struct _va_range inadr;
00300
00301 #if 0
00302 if (file && ((struct _fcb *)file)->fcb_b_type!=DYN$C_FCB)
00303 if (file && (!file->f_op || !file->f_op->mmap))
00304 return -ENODEV;
00305 #endif
00306
00307 if ((len = PAGE_ALIGN(len)) == 0)
00308 return addr;
00309
00310 if (len > TASK_SIZE)
00311 return -EINVAL;
00312
00313
00314 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
00315 return -EINVAL;
00316
00317
00318 if (mm->map_count > MAX_MAP_COUNT)
00319 return -ENOMEM;
00320
00321
00322
00323
00324 addr = get_unmapped_area(file, addr, len, pgoff, flags);
00325 if (addr & ~PAGE_MASK)
00326 return addr;
00327
00328 #if 0
00329 munmap_back2:
00330
00331 vma = find_vma_prev(current->pcb_l_phd, addr, &prev);
00332 if (vma && vma->rde_pq_start_va < addr + len) {
00333 if (do_munmap(mm, addr, len))
00334 return -ENOMEM;
00335 goto munmap_back2;
00336 }
00337 #endif
00338
00339
00340
00341
00342
00343 rde_l_flags = calc_rde$l_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
00344
00345
00346 if (rde_l_flags & VM_LOCKED) {
00347 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
00348 locked += len;
00349 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
00350 return -EAGAIN;
00351 }
00352
00353 if (file) {
00354 if (0) {
00355 switch (flags & MAP_TYPE) {
00356 case MAP_SHARED:
00357 if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE))
00358 return -EACCES;
00359
00360
00361 if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE))
00362 return -EACCES;
00363
00364
00365 if (locks_verify_locked(file->f_dentry->d_inode))
00366 return -EAGAIN;
00367
00368 rde_l_flags |= VM_SHARED | VM_MAYSHARE;
00369 if (!(file->f_mode & FMODE_WRITE))
00370 rde_l_flags &= ~(VM_MAYWRITE | VM_SHARED);
00371
00372
00373 case MAP_PRIVATE:
00374 if (!(file->f_mode & FMODE_READ))
00375 return -EACCES;
00376 break;
00377
00378 default:
00379 return -EINVAL;
00380 }
00381 }
00382 } else {
00383 rde_l_flags |= VM_SHARED | VM_MAYSHARE;
00384 switch (flags & MAP_TYPE) {
00385 default:
00386 return -EINVAL;
00387 case MAP_PRIVATE:
00388 rde_l_flags &= ~(VM_SHARED | VM_MAYSHARE);
00389
00390 case MAP_SHARED:
00391 break;
00392 }
00393 }
00394
00395
00396 error = -ENOMEM;
00397 munmap_back:
00398
00399 vma = find_vma(current->pcb_l_phd, addr);
00400 if (vma && vma->rde_pq_start_va < addr + len) {
00401 if (do_munmap(mm, addr, len))
00402 return -ENOMEM;
00403 goto munmap_back;
00404 }
00405
00406
00407 if ((mm->total_vm << PAGE_SHIFT) + len
00408 > current->rlim[RLIMIT_AS].rlim_cur)
00409 return -ENOMEM;
00410
00411
00412 if ((rde_l_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
00413 !(flags & MAP_NORESERVE) &&
00414 !vm_enough_memory(len >> PAGE_SHIFT))
00415 return -ENOMEM;
00416
00417
00418 #if 0
00419 if (!file && !(rde_l_flags & VM_SHARED) && vma )
00420 if (vma_merge(mm, prev, rb_parent, addr, addr + len, rde_l_flags))
00421 goto out;
00422 #endif
00423
00424 inadr.va_range_ps_start_va=addr;
00425 inadr.va_range_ps_end_va=addr+len;
00426 prot=prot;
00427 flags=flags;
00428
00429 exe_create_region_32 (len,*(unsigned long*)&protection_map[(rde$l_flags>>8) & 0x0f] ,rde$l_flags ,0,0,0,addr);
00430 if (file) {
00431 struct _fcb * fcb=file;
00432 #if 0
00433 if (((struct _fcb *)file)->fcb_b_type!=DYN$C_FCB)
00434 fcb=e2_search_fcb(file->f_dentry->d_inode);
00435 #endif
00436 struct _rabdef * rab = file;
00437 struct _fabdef * fab = rab->rab_l_fab;
00438 int chan = fab->fab_l_stv;
00439 exe_crmpsc(&inadr,0,0,0,0,0,0,chan,0,pgoff<<3,0,0);
00440 } else {
00441 exe_cretva(&inadr,0,0);
00442 }
00443 return addr;
00444
00445
00446
00447
00448
00449 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
00450 if (!vma)
00451 return -ENOMEM;
00452
00453
00454 vma->rde_pq_start_va = addr;
00455 vma->rde_q_region_size = len;
00456 vma->rde_l_flags = rde$l_flags;
00457 vma->rde_r_regprot.regprt$l_region_prot = *(unsigned long*)&protection_map[(rde$l_flags>>8) & 0x0f];
00458 #if 0
00459 vma->vm_ops = NULL;
00460 vma->vm_pgoff = pgoff;
00461 vma->vm_file = NULL;
00462 vma->vm_private_data = NULL;
00463 vma->vm_raend = 0;
00464 #endif
00465
00466 if (file) {
00467 error = -EINVAL;
00468 if (rde_l_flags & (VM_GROWSDOWN|VM_GROWSUP))
00469 goto free_vma;
00470 if (rde_l_flags & VM_DENYWRITE) {
00471 error = deny_write_access(file);
00472 if (error)
00473 goto free_vma;
00474 correct_wcount = 1;
00475 }
00476
00477 get_file(file);
00478 #if 0
00479 error = file->f_op->mmap(file, vma);
00480 #endif
00481 if (error)
00482 goto unmap_and_free_vma;
00483 } else if (flags & MAP_SHARED) {
00484 error = shmem_zero_setup(vma);
00485 if (error)
00486 goto free_vma;
00487 }
00488
00489
00490
00491
00492
00493
00494 addr = vma->rde_pq_start_va;
00495
00496 #if 0
00497
00498 if (((struct _fcb *)file)->fcb_b_type!=DYN$C_FCB)
00499 if (correct_wcount)
00500 atomic_inc(&file->f_dentry->d_inode->i_writecount);
00501 #endif
00502
00503 out:
00504 mm->total_vm += len >> PAGE_SHIFT;
00505 if (rde_l_flags & VM_LOCKED) {
00506 mm->locked_vm += len >> PAGE_SHIFT;
00507 make_pages_present(addr, addr + len);
00508 }
00509 return addr;
00510
00511 unmap_and_free_vma:
00512 #if 0
00513 if (((struct _fcb *)file)->fcb_b_type!=DYN$C_FCB)
00514 if (correct_wcount)
00515 atomic_inc(&file->f_dentry->d_inode->i_writecount);
00516
00517 if (((struct _fcb *)file)->fcb_b_type!=DYN$C_FCB)
00518 fput(file);
00519 #endif
00520
00521
00522 zap_page_range(mm, vma->rde_pq_start_va, vma->rde$q_region_size);
00523 free_vma:
00524 kmem_cache_free(vm_area_cachep, vma);
00525 return error;
00526 }
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536
00537
00538
00539 #ifndef HAVE_ARCH_UNMAPPED_AREA
00540 static inline unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
00541 {
00542 struct _rde *vma;
00543
00544 if (len > TASK_SIZE)
00545 return -ENOMEM;
00546
00547 if (addr) {
00548 addr = PAGE_ALIGN(addr);
00549
00550 vma = find_vma(current->pcb_l_phd,addr);
00551 if (TASK_SIZE - len >= addr &&
00552 (!vma || addr + len <= vma->rde_pq_start_va))
00553 return addr;
00554 }
00555 addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
00556
00557 for (vma = find_vma(current->pcb_l_phd,addr); ; vma = vma->rde$ps_va_list_flink) {
00558
00559 if (TASK_SIZE - len < addr)
00560 return -ENOMEM;
00561 if (!vma || addr + len <= vma->rde_pq_start_va)
00562 return addr;
00563 addr = (vma->rde_pq_start_va + vma->rde$q_region_size);
00564 }
00565 }
00566 #else
00567 extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
00568 #endif
00569
00570 unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
00571 {
00572 if (flags & MAP_FIXED) {
00573 if (addr > TASK_SIZE - len)
00574 return -ENOMEM;
00575 if (addr & ~PAGE_MASK)
00576 return -EINVAL;
00577 return addr;
00578 }
00579
00580 #if 0
00581 if (file && ((struct _fcb *)file)->fcb_b_type!=DYN$C_FCB)
00582 if (file && file->f_op && file->f_op->get_unmapped_area)
00583 return file->f_op->get_unmapped_area(file, addr, len, pgoff, flags);
00584 #endif
00585
00586 return arch_get_unmapped_area(file, addr, len, pgoff, flags);
00587 }
00588
00589 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) {
00590 struct _phd * phd=mm;
00591 struct _rde * head=&phd->phd_ps_p0_va_list_flink;
00592 struct _rde * tmp=head->rde_ps_va_list_flink;
00593 struct _rde * prev=head;
00594 while (tmp!=head) {
00595 if (addr<(tmp->rde_ps_start_va+(unsigned long)tmp->rde$q_region_size))
00596 return tmp;
00597 prev=tmp;
00598 tmp=tmp->rde_ps_va_list_flink;
00599 }
00600 return 0;
00601 }
00602
00603 struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **prev) {
00604 struct _phd * phd=mm;
00605 struct _rde * head=&phd->phd_ps_p0_va_list_flink;
00606 struct _rde * tmp=head->rde_ps_va_list_flink;
00607 *prev=0;
00608 while (tmp!=head) {
00609 if (addr<(tmp->rde_ps_start_va+(unsigned long)tmp->rde$q_region_size))
00610 return tmp;
00611 *prev=tmp;
00612 tmp=tmp->rde_ps_va_list_flink;
00613 }
00614 return 0;
00615 }
00616
00617 struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr)
00618 {
00619 struct _rde * vma;
00620 unsigned long start;
00621
00622 addr &= PAGE_MASK;
00623 vma = find_vma(current->pcb_l_phd,addr);
00624 if (!vma)
00625 return NULL;
00626 if (vma->rde_ps_start_va <= addr)
00627 return vma;
00628 if (!(vma->rde_l_flags & VM_GROWSDOWN))
00629 return NULL;
00630 start = vma->rde_ps_start_va;
00631 if (expand_stack(vma, addr))
00632 return NULL;
00633 if (vma->rde_l_flags & VM_LOCKED) {
00634 make_pages_present(addr, start);
00635 }
00636 return vma;
00637 }
00638
00639 struct vm_area_struct * find_vma_intersection2(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
00640 {
00641 struct _rde * vma = find_vma(mm,start_addr);
00642
00643 if (vma && end_addr <= vma->rde_ps_start_va)
00644 vma = NULL;
00645 return vma;
00646 }
00647
00648
00649
00650
00651
00652
00653
00654
00655
00656
00657
00658
00659
00660
00661
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671 static struct _rde * unmap_fixup(struct mm_struct *mm,
00672 struct _rde *area, unsigned long addr, size_t len,
00673 struct _rde *extra)
00674 {
00675 struct _rde *mpnt;
00676 unsigned long end = addr + len;
00677
00678 #if 0
00679 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
00680 if (area->rde_l_flags & VM_LOCKED)
00681 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
00682 #endif
00683
00684
00685 if (addr == area->rde_pq_start_va && end == (area->rde$pq_start_va + area->rde$q_region_size)) {
00686 #if 0
00687 if (area->vm_ops && area->vm_ops->close)
00688 area->vm_ops->close(area);
00689 if (area->vm_file)
00690 fput(area->vm_file);
00691 #endif
00692
00693 return extra;
00694 }
00695
00696
00697 if (end == (area->rde_pq_start_va + area->rde$q_region_size)) {
00698
00699
00700
00701
00702 area->rde_q_region_size = addr - (unsigned long) area->rde$pq_start_va;
00703 lock_vma_mappings(area);
00704 spin_lock(&mm->page_table_lock);
00705 } else if (addr == area->rde_pq_start_va) {
00706
00707
00708 area->rde_pq_start_va = end;
00709 area->rde_q_region_size -= len;
00710 lock_vma_mappings(area);
00711 spin_lock(&mm->page_table_lock);
00712 } else {
00713
00714
00715 mpnt = extra;
00716 extra = NULL;
00717
00718
00719 mpnt->rde_pq_start_va = end;
00720 mpnt->rde_q_region_size = area->rde$q_region_size - len;
00721 mpnt->rde_r_regprot.regprt$l_region_prot = area->rde$r_regprot.regprt$l_region_prot;
00722 mpnt->rde_l_flags = area->rde$l_flags;
00723
00724
00725
00726
00727 #if 0
00728 mpnt->vm_private_data = area->vm_private_data;
00729 if (mpnt->vm_file)
00730 get_file(mpnt->vm_file);
00731 if (mpnt->vm_ops && mpnt->vm_ops->open)
00732 mpnt->vm_ops->open(mpnt);
00733 #endif
00734 area->rde_q_region_size = addr - (unsigned long) area->rde$pq_start_va;
00735
00736
00737
00738
00739 lock_vma_mappings(area);
00740 spin_lock(&mm->page_table_lock);
00741
00742 insrde(mpnt,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
00743 }
00744
00745
00746 insrde(area,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
00747 spin_unlock(&mm->page_table_lock);
00748 unlock_vma_mappings(area);
00749 return extra;
00750 }
00751
00752
00753
00754
00755
00756
00757
00758
00759
00760
00761
00762
00763
00764
00765 static void free_pgtables(struct mm_struct * mm, struct _rde *prev,
00766 unsigned long start, unsigned long end)
00767 {
00768 unsigned long first = start & PGDIR_MASK;
00769 unsigned long last = end + PGDIR_SIZE - 1;
00770 unsigned long start_index, end_index;
00771
00772 if (!prev) {
00773 prev = mm->mmap;
00774 if (!prev)
00775 goto no_mmaps;
00776 if ((prev->rde_pq_start_va + prev->rde$q_region_size) > start) {
00777 if (last > prev->rde_pq_start_va)
00778 last = prev->rde_pq_start_va;
00779 goto no_mmaps;
00780 }
00781 }
00782 for (;;) {
00783 struct _rde *next = 0;
00784 next = prev->rde_ps_va_list_flink;
00785
00786 if (next && next->rde_b_type==30 ) {
00787 if (next->rde_pq_start_va < start) {
00788 prev = next;
00789 continue;
00790 }
00791 if (last > next->rde_pq_start_va)
00792 last = next->rde_pq_start_va;
00793 }
00794 if ((prev->rde_pq_start_va + prev->rde$q_region_size) > first)
00795 first = (prev->rde_pq_start_va + prev->rde$q_region_size) + PGDIR_SIZE - 1;
00796 break;
00797 }
00798 no_mmaps:
00799
00800
00801
00802
00803 start_index = pgd_index(first);
00804 end_index = pgd_index(last);
00805 if (end_index > start_index) {
00806 clear_page_tables(mm, start_index, end_index - start_index);
00807 flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
00808 }
00809 }
00810
00811
00812
00813
00814
00815
00816 int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
00817 {
00818 struct _rde *mpnt, *prev, **npp, *free, *extra;
00819
00820 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
00821 return -EINVAL;
00822
00823 if ((len = PAGE_ALIGN(len)) == 0)
00824 return -EINVAL;
00825
00826
00827
00828
00829
00830
00831
00832 mpnt = find_vma_prev(current->pcb_l_phd,addr,&prev);
00833 if (!mpnt)
00834 return 0;
00835
00836
00837 if (mpnt->rde_pq_start_va >= addr+len)
00838 return 0;
00839
00840
00841 if ((mpnt->rde_pq_start_va < addr && (mpnt->rde$pq_start_va + mpnt->rde$q_region_size) > addr+len)
00842 && mm->map_count >= MAX_MAP_COUNT)
00843 return -ENOMEM;
00844
00845
00846
00847
00848
00849 extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
00850 if (!extra)
00851 return -ENOMEM;
00852
00853 npp = (prev ? &prev->rde_ps_va_list_flink : ¤t->pcb$l_phd->phd$ps_p0_va_list_flink);
00854 free = NULL;
00855 spin_lock(&mm->page_table_lock);
00856 for ( ; mpnt && mpnt->rde_pq_start_va < addr+len; mpnt = *npp) {
00857
00858 remque(mpnt,0);
00859 mpnt->rde_ps_va_list_flink = free;
00860 free = mpnt;
00861
00862 }
00863 mm->mmap_cache = NULL;
00864 spin_unlock(&mm->page_table_lock);
00865
00866
00867
00868
00869
00870
00871
00872 while ((mpnt = free) != NULL) {
00873 unsigned long st, end, size;
00874 struct file *file = NULL;
00875
00876 free = free->rde_ps_va_list_flink;
00877
00878 st = addr < mpnt->rde_pq_start_va ? mpnt->rde$pq_start_va : addr;
00879 end = addr+len;
00880 end = end > (mpnt->rde_pq_start_va + mpnt->rde$q_region_size) ? (mpnt->rde$pq_start_va + mpnt->rde$q_region_size) : end;
00881 size = end - st;
00882
00883 if (mpnt->rde_l_flags & VM_DENYWRITE &&
00884 (st != mpnt->rde_pq_start_va || end != (mpnt->rde$pq_start_va + mpnt->rde$q_region_size)) && 1) {
00885 #if 0
00886 (file = mpnt->vm_file) != NULL) {
00887 atomic_dec(&file->f_dentry->d_inode->i_writecount);
00888 #endif
00889 }
00890
00891 mm->map_count--;
00892
00893 zap_page_range(mm, st, size);
00894
00895
00896
00897
00898 extra = unmap_fixup(mm, mpnt, st, size, extra);
00899 #if 0
00900 if (file)
00901 atomic_inc(&file->f_dentry->d_inode->i_writecount);
00902 #endif
00903 }
00904
00905
00906
00907 #if 0 // not yet
00908 if (extra)
00909 kmem_cache_free(vm_area_cachep, extra);
00910 #endif
00911
00912 free_pgtables(mm, prev, addr, addr+len);
00913
00914 return 0;
00915 }
00916
00917 asmlinkage long sys_munmap(unsigned long addr, size_t len)
00918 {
00919 int ret;
00920 struct mm_struct *mm = current->mm;
00921
00922 down_write(&mm->mmap_sem);
00923 ret = do_munmap(mm, addr, len);
00924 up_write(&mm->mmap_sem);
00925 return ret;
00926 }
00927
00928
00929
00930
00931
00932
00933 unsigned long do_brk(unsigned long addr, unsigned long len)
00934 {
00935 struct mm_struct * mm = current->mm;
00936 struct _rde * vma, * prev;
00937 unsigned long flags;
00938 rb_node_t ** rb_link, * rb_parent;
00939
00940 len = PAGE_ALIGN(len);
00941 if (!len)
00942 return addr;
00943
00944
00945
00946
00947 if (mm->def_flags & VM_LOCKED) {
00948 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
00949 locked += len;
00950 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
00951 return -EAGAIN;
00952 }
00953
00954
00955
00956
00957 munmap_back:
00958
00959 vma = find_vma(current->pcb_l_phd, addr);
00960 if (vma && vma->rde_pq_start_va < addr + len) {
00961 if (do_munmap(mm, addr, len))
00962 return -ENOMEM;
00963 goto munmap_back;
00964 }
00965
00966
00967 if ((mm->total_vm << PAGE_SHIFT) + len
00968 > current->rlim[RLIMIT_AS].rlim_cur)
00969 return -ENOMEM;
00970
00971 if (mm->map_count > MAX_MAP_COUNT)
00972 return -ENOMEM;
00973
00974 if (!vm_enough_memory(len >> PAGE_SHIFT))
00975 return -ENOMEM;
00976
00977 flags = calc_rde_l_flags(PROT_READ|PROT_WRITE|PROT_EXEC,
00978 MAP_FIXED|MAP_PRIVATE) | mm->def_flags;
00979
00980 flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
00981
00982
00983 #if 0
00984 if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags))
00985 goto out;
00986 #endif
00987
00988
00989
00990
00991 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
00992 if (!vma)
00993 return -ENOMEM;
00994
00995
00996 vma->rde_pq_start_va = addr;
00997 vma->rde_q_region_size = len;
00998 vma->rde_l_flags = flags;
00999 vma->rde_r_regprot.regprt$l_region_prot = *(unsigned long*)&protection_map[(flags>>8) & 0x0f];
01000 #if 0
01001 vma->vm_ops = NULL;
01002 vma->vm_pgoff = 0;
01003 vma->vm_file = NULL;
01004 vma->vm_private_data = NULL;
01005 #endif
01006
01007
01008 insrde(vma,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
01009
01010 out:
01011 mm->total_vm += len >> PAGE_SHIFT;
01012 if (flags & VM_LOCKED) {
01013 mm->locked_vm += len >> PAGE_SHIFT;
01014 make_pages_present(addr, addr + len);
01015 }
01016 return addr;
01017 }
01018
01019
01020 void exit_mmap(struct mm_struct * mm)
01021 {
01022 struct _rde * mpnt;
01023
01024 release_segments(mm);
01025 spin_lock(&mm->page_table_lock);
01026 mpnt = current->pcb_l_phd->phd$ps_p0_va_list_flink;
01027
01028
01029 mm->mmap = mm->mmap_cache = NULL;
01030 mm->mm_rb = RB_ROOT;
01031 mm->rss = 0;
01032 spin_unlock(&mm->page_table_lock);
01033 mm->total_vm = 0;
01034 mm->locked_vm = 0;
01035
01036 flush_cache_mm(mm);
01037
01038 while (mpnt!=¤t->pcb_l_phd->phd$ps_p0_va_list_flink) {
01039 struct _rde * next = mpnt->rde_ps_va_list_flink;
01040 unsigned long start = mpnt->rde_pq_start_va;
01041 unsigned long size = mpnt->rde_q_region_size;
01042
01043 #if 0
01044 if (mpnt->vm_ops) {
01045 if (mpnt->vm_ops->close)
01046 mpnt->vm_ops->close(mpnt);
01047 }
01048 #endif
01049 mm->map_count--;
01050
01051 zap_page_range(mm, start, size);
01052 #if 0
01053 if (mpnt->vm_file)
01054 fput(mpnt->vm_file);
01055 #endif
01056
01057 mpnt = next;
01058 }
01059 flush_tlb_mm(mm);
01060
01061
01062 #if 0
01063 if (mm->map_count)
01064 BUG();
01065 #endif
01066
01067
01068 #ifdef __i386__
01069 clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD -1 );
01070 #else
01071
01072
01073 #if 0
01074 void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling);
01075 #include <asm/tlb.h>
01076 struct mmu_gather *tlb = tlb_gather_mmu(mm);
01077 free_pgd_range(&tlb, FIRST_USER_ADDRESS, 0x7f000000, FIRST_USER_ADDRESS, 0);
01078 #endif
01079 #endif
01080 }
01081
01082
01083
01084
01085
01086 #if 0
01087 void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
01088 {
01089 struct _rde * __vma, * prev;
01090 rb_node_t ** rb_link, * rb_parent;
01091
01092 __vma = find_vma_prepare(mm, vma->rde_pq_start_va, &prev, &rb_link, &rb_parent);
01093 if (__vma && __vma->rde_pq_start_va < (vma->rde$pq_start_va + vma->rde$q_region_size))
01094 BUG();
01095
01096 mm->map_count++;
01097 validate_mm(mm);
01098 }
01099
01100 void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
01101 {
01102 struct _rde * __vma, * prev;
01103 rb_node_t ** rb_link, * rb_parent;
01104
01105 __vma = find_vma_prepare(mm, vma->rde_pq_start_va, &prev, &rb_link, &rb_parent);
01106 if (__vma && __vma->rde_pq_start_va < (vma->rde$pq_start_va + vma->rde$q_region_size))
01107 BUG();
01108
01109 validate_mm(mm);
01110 }
01111 #endif
01112