00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 #include <linux/config.h>
00014 #include <linux/slab.h>
00015 #include <linux/smp_lock.h>
00016 #include <linux/shm.h>
00017 #include <linux/mman.h>
00018 #include <linux/swap.h>
00019
00020 #include <asm/uaccess.h>
00021 #include <asm/pgalloc.h>
00022 #include <asm/pgtable.h>
00023
00024 #include <ipldef.h>
00025 #include <phddef.h>
00026 #include <rdedef.h>
00027 #include <misc_routines.h>
00028
00029 extern int vm_enough_memory(long pages);
00030
00031 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
00032 {
00033 pgd_t * pgd;
00034 pud_t * pud;
00035 pmd_t * pmd;
00036 pte_t * pte = NULL;
00037
00038 pgd = pgd_offset(mm, addr);
00039 if (pgd_none(*pgd))
00040 goto end;
00041 if (pgd_bad(*pgd)) {
00042 pgd_ERROR(*pgd);
00043 pgd_clear(pgd);
00044 goto end;
00045 }
00046
00047 pud = pud_offset(pgd, addr);
00048 if (pud_none(*pud))
00049 goto end;
00050 if (pud_bad(*pud)) {
00051 pud_ERROR(*pud);
00052 pud_clear(pud);
00053 goto end;
00054 }
00055
00056 pmd = pmd_offset(pud, addr);
00057 if (pmd_none(*pmd))
00058 goto end;
00059 if (pmd_bad(*pmd)) {
00060 pmd_ERROR(*pmd);
00061 pmd_clear(pmd);
00062 goto end;
00063 }
00064
00065 pte = pte_offset(pmd, addr);
00066 if (pte_none(*pte))
00067 pte = NULL;
00068 end:
00069 return pte;
00070 }
00071
00072 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
00073 {
00074 pmd_t * pmd;
00075 pte_t * pte = NULL;
00076
00077 pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
00078 if (pmd)
00079 pte = pte_alloc(mm, pmd, addr);
00080 return pte;
00081 }
00082
00083 static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
00084 {
00085 int error = 0;
00086 pte_t pte;
00087
00088 if (!pte_none(*src)) {
00089 pte = ptep_get_and_clear(42, 42, src);
00090 if (!dst) {
00091
00092 dst = src;
00093 error++;
00094 }
00095 set_pte(dst, pte);
00096 }
00097 return error;
00098 }
00099
00100 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
00101 {
00102 int error = 0;
00103 pte_t * src;
00104
00105 spin_lock(&mm->page_table_lock);
00106 src = get_one_pte(mm, old_addr);
00107 if (src)
00108 error = copy_one_pte(mm, src, alloc_one_pte(mm, new_addr));
00109 spin_unlock(&mm->page_table_lock);
00110 return error;
00111 }
00112
00113 static int move_page_tables(struct mm_struct * mm,
00114 unsigned long new_addr, unsigned long old_addr, unsigned long len)
00115 {
00116 unsigned long offset = len;
00117
00118 flush_cache_range(mm, old_addr, old_addr + len);
00119
00120
00121
00122
00123
00124
00125 while (offset) {
00126 offset -= PAGE_SIZE;
00127 if (move_one_page(mm, old_addr + offset, new_addr + offset))
00128 goto oops_we_failed;
00129 }
00130 flush_tlb_range(mm, old_addr, old_addr + len);
00131 return 0;
00132
00133
00134
00135
00136
00137
00138
00139
00140 oops_we_failed:
00141 flush_cache_range(mm, new_addr, new_addr + len);
00142 while ((offset += PAGE_SIZE) < len)
00143 move_one_page(mm, new_addr + offset, old_addr + offset);
00144 zap_page_range(mm, new_addr, len);
00145 return -1;
00146 }
00147
00148 static inline unsigned long move_vma(struct _rde * vma,
00149 unsigned long addr, unsigned long old_len, unsigned long new_len,
00150 unsigned long new_addr)
00151 {
00152 struct mm_struct * mm = current->mm;
00153 struct _rde * new_vma, * next, * prev;
00154 int allocated_vma;
00155
00156 new_vma = NULL;
00157
00158 next = find_vma_prev(current->pcb_l_phd,new_addr,&prev);
00159 if (next) {
00160 if (prev && (prev->rde_pq_start_va + prev->rde$q_region_size) == new_addr &&
00161 can_vma_merge(prev, vma->rde_l_flags) && !(vma->rde$l_flags & VM_SHARED)) {
00162 spin_lock(&mm->page_table_lock);
00163 prev->rde_q_region_size = new_len;
00164 spin_unlock(&mm->page_table_lock);
00165 new_vma = prev;
00166 if (next != prev->rde_ps_va_list_flink)
00167 BUG();
00168 if ((prev->rde_pq_start_va + prev->rde$q_region_size) == next->rde$pq_start_va && can_vma_merge(next, prev->rde$l_flags)) {
00169 spin_lock(&mm->page_table_lock);
00170 prev->rde_q_region_size = next->rde$q_region_size;
00171 #if 0
00172 __vma_unlink(mm, next, prev);
00173 #endif
00174 spin_unlock(&mm->page_table_lock);
00175
00176 mm->map_count--;
00177 kmem_cache_free(vm_area_cachep, next);
00178 }
00179 } else if (next->rde_pq_start_va == new_addr + new_len &&
00180 can_vma_merge(next, vma->rde_l_flags) && !(vma->rde$l_flags & VM_SHARED)) {
00181 spin_lock(&mm->page_table_lock);
00182 next->rde_pq_start_va = new_addr;
00183 spin_unlock(&mm->page_table_lock);
00184 new_vma = next;
00185 }
00186 } else {
00187
00188 prev = find_vma(current->pcb_l_phd,new_addr-1);
00189 if (prev && (prev->rde_pq_start_va + prev->rde$q_region_size) == new_addr &&
00190 can_vma_merge(prev, vma->rde_l_flags) && !(vma->rde$l_flags & VM_SHARED)) {
00191 spin_lock(&mm->page_table_lock);
00192 prev->rde_q_region_size = new_len;
00193 spin_unlock(&mm->page_table_lock);
00194 new_vma = prev;
00195 }
00196 }
00197
00198 allocated_vma = 0;
00199 if (!new_vma) {
00200 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
00201 if (!new_vma)
00202 goto out;
00203 allocated_vma = 1;
00204 }
00205
00206 if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
00207 if (allocated_vma) {
00208 *new_vma = *vma;
00209 new_vma->rde_pq_start_va = new_addr;
00210 new_vma->rde_q_region_size = new_len;
00211 #if 0
00212 new_vma->vm_pgoff += (addr - vma->rde_pq_start_va) >> PAGE_SHIFT;
00213 new_vma->vm_raend = 0;
00214 if (new_vma->vm_file)
00215 get_file(new_vma->vm_file);
00216 if (new_vma->vm_ops && new_vma->vm_ops->open)
00217 new_vma->vm_ops->open(new_vma);
00218 insert_vm_struct(current->mm, new_vma);
00219 #endif
00220 insrde(new_vma,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
00221 }
00222 do_munmap(current->mm, addr, old_len);
00223 current->mm->total_vm += new_len >> PAGE_SHIFT;
00224 if (new_vma->rde_l_flags & VM_LOCKED) {
00225 current->mm->locked_vm += new_len >> PAGE_SHIFT;
00226 make_pages_present(new_vma->rde_pq_start_va,
00227 (new_vma->rde_pq_start_va + new_vma->rde$q_region_size));
00228 }
00229 return new_addr;
00230 }
00231 if (allocated_vma)
00232 kmem_cache_free(vm_area_cachep, new_vma);
00233 out:
00234 return -ENOMEM;
00235 }
00236
00237
00238
00239
00240
00241
00242
00243
00244 unsigned long do_mremap(unsigned long addr,
00245 unsigned long old_len, unsigned long new_len,
00246 unsigned long flags, unsigned long new_addr)
00247 {
00248 struct _rde *vma;
00249 unsigned long ret = -EINVAL;
00250
00251 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
00252 goto out;
00253
00254 if (addr & ~PAGE_MASK)
00255 goto out;
00256
00257 old_len = PAGE_ALIGN(old_len);
00258 new_len = PAGE_ALIGN(new_len);
00259
00260
00261 if (flags & MREMAP_FIXED) {
00262 if (new_addr & ~PAGE_MASK)
00263 goto out;
00264 if (!(flags & MREMAP_MAYMOVE))
00265 goto out;
00266
00267 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
00268 goto out;
00269
00270
00271
00272
00273 if ((new_addr <= addr) && (new_addr+new_len) > addr)
00274 goto out;
00275
00276 if ((addr <= new_addr) && (addr+old_len) > new_addr)
00277 goto out;
00278
00279 do_munmap(current->mm, new_addr, new_len);
00280 }
00281
00282
00283
00284
00285
00286 ret = addr;
00287 if (old_len >= new_len) {
00288 do_munmap(current->mm, addr+new_len, old_len - new_len);
00289 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
00290 goto out;
00291 }
00292
00293
00294
00295
00296 ret = -EFAULT;
00297
00298 vma = find_vma(current->pcb_l_phd,addr);
00299 if (!vma || vma->rde_pq_start_va > addr)
00300 goto out;
00301
00302 if (old_len > (vma->rde_pq_start_va + vma->rde$q_region_size) - addr)
00303 goto out;
00304 if (vma->rde_l_flags & VM_DONTEXPAND) {
00305 if (new_len > old_len)
00306 goto out;
00307 }
00308 if (vma->rde_l_flags & VM_LOCKED) {
00309 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
00310 locked += new_len - old_len;
00311 ret = -EAGAIN;
00312 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
00313 goto out;
00314 }
00315 ret = -ENOMEM;
00316 if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
00317 > current->rlim[RLIMIT_AS].rlim_cur)
00318 goto out;
00319
00320 if ((vma->rde_l_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
00321 !(flags & MAP_NORESERVE) &&
00322 !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
00323 goto out;
00324
00325
00326
00327
00328 if (old_len == (vma->rde_pq_start_va + vma->rde$q_region_size) - addr &&
00329 !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
00330 (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
00331 unsigned long max_addr = TASK_SIZE;
00332 if (vma->rde_ps_va_list_flink)
00333 max_addr = vma->rde_ps_va_list_flink->rde$pq_start_va;
00334
00335 if (max_addr - addr >= new_len) {
00336 int pages = (new_len - old_len) >> PAGE_SHIFT;
00337
00338 vma->rde_q_region_size = new_len;
00339
00340 current->mm->total_vm += pages;
00341 if (vma->rde_l_flags & VM_LOCKED) {
00342 current->mm->locked_vm += pages;
00343 make_pages_present(addr + old_len,
00344 addr + new_len);
00345 }
00346 ret = addr;
00347 goto out;
00348 }
00349 }
00350
00351
00352
00353
00354
00355 ret = -ENOMEM;
00356 if (flags & MREMAP_MAYMOVE) {
00357 if (!(flags & MREMAP_FIXED)) {
00358 unsigned long map_flags = 0;
00359 if (vma->rde_l_flags & VM_SHARED)
00360 map_flags |= MAP_SHARED;
00361
00362 new_addr = get_unmapped_area(0, 0, new_len, 0, map_flags);
00363 ret = new_addr;
00364 if (new_addr & ~PAGE_MASK)
00365 goto out;
00366 }
00367 ret = move_vma(vma, addr, old_len, new_len, new_addr);
00368 }
00369 out:
00370 return ret;
00371 }
00372
00373 asmlinkage unsigned long sys_mremap(unsigned long addr,
00374 unsigned long old_len, unsigned long new_len,
00375 unsigned long flags, unsigned long new_addr)
00376 {
00377 unsigned long ret;
00378
00379 down_write(¤t->mm->mmap_sem);
00380 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
00381 up_write(¤t->mm->mmap_sem);
00382 return ret;
00383 }
00384