00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include <linux/config.h>
00013 #include <linux/slab.h>
00014 #include <linux/smp_lock.h>
00015 #include <linux/shm.h>
00016 #include <linux/mman.h>
00017
00018 #include <asm/uaccess.h>
00019 #include <asm/pgalloc.h>
00020 #include <asm/pgtable.h>
00021
00022 #include <ipldef.h>
00023 #include <phddef.h>
00024 #include <rdedef.h>
00025 #include <secdef.h>
00026 #include <misc_routines.h>
00027
00028 static inline void change_pte_range(pmd_t * pmd, unsigned long address,
00029 unsigned long size, pgprot_t newprot)
00030 {
00031 pte_t * pte;
00032 unsigned long end;
00033
00034 if (pmd_none(*pmd))
00035 return;
00036 if (pmd_bad(*pmd)) {
00037 pmd_ERROR(*pmd);
00038 pmd_clear(pmd);
00039 return;
00040 }
00041 pte = pte_offset(pmd, address);
00042 address &= ~PMD_MASK;
00043 end = address + size;
00044 if (end > PMD_SIZE)
00045 end = PMD_SIZE;
00046 do {
00047 if (pte_present(*pte)) {
00048 pte_t entry;
00049
00050
00051
00052
00053
00054 entry = ptep_get_and_clear(42, 42, pte);
00055 set_pte(pte, pte_modify(entry, newprot));
00056 }
00057 address += PAGE_SIZE;
00058 pte++;
00059 } while (address && (address < end));
00060 }
00061
00062 static inline void change_pmd_range(pud_t * pud, unsigned long address,
00063 unsigned long size, pgprot_t newprot)
00064 {
00065 pmd_t * pmd;
00066 unsigned long end;
00067
00068 if (pud_none(*pud))
00069 return;
00070 if (pud_bad(*pud)) {
00071 pud_ERROR(*pud);
00072 pud_clear(pud);
00073 return;
00074 }
00075 pmd = pmd_offset(pud, address);
00076 address &= ~PGDIR_MASK;
00077 end = address + size;
00078 if (end > PGDIR_SIZE)
00079 end = PGDIR_SIZE;
00080 do {
00081 change_pte_range(pmd, address, end - address, newprot);
00082 address = (address + PMD_SIZE) & PMD_MASK;
00083 pmd++;
00084 } while (address && (address < end));
00085 }
00086
00087 static inline void change_pud_range(pgd_t * pgd, unsigned long address,
00088 unsigned long size, pgprot_t newprot)
00089 {
00090 pud_t * pud;
00091 unsigned long end;
00092
00093 if (pgd_none(*pgd))
00094 return;
00095 if (pgd_bad(*pgd)) {
00096 pgd_ERROR(*pgd);
00097 pgd_clear(pgd);
00098 return;
00099 }
00100 pud = pud_offset(pgd, address);
00101 address &= ~PGDIR_MASK;
00102 end = address + size;
00103 if (end > PGDIR_SIZE)
00104 end = PGDIR_SIZE;
00105 do {
00106 change_pmd_range(pud, address, end - address, newprot);
00107 address = (address + PUD_SIZE) & PUD_MASK;
00108 pud++;
00109 } while (address && (address < end));
00110 }
00111
00112 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
00113 {
00114 pgd_t *dir;
00115 unsigned long beg = start;
00116
00117 dir = pgd_offset(current->mm, start);
00118 flush_cache_range(current->mm, beg, end);
00119 if (start >= end)
00120 BUG();
00121 spin_lock(¤t->mm->page_table_lock);
00122 do {
00123 change_pud_range(dir, start, end - start, newprot);
00124 start = (start + PGDIR_SIZE) & PGDIR_MASK;
00125 dir++;
00126 } while (start && (start < end));
00127 spin_unlock(¤t->mm->page_table_lock);
00128 flush_tlb_range(current->mm, beg, end);
00129 return;
00130 }
00131
00132 static inline int mprotect_fixup_all(struct _rde * vma, struct _rde ** pprev,
00133 int newflags, pgprot_t prot)
00134 {
00135 struct _rde * prev = *pprev;
00136 struct mm_struct * mm = current->mm;
00137
00138 if (prev && (prev->rde_pq_start_va + prev->rde$q_region_size) == vma->rde$pq_start_va && can_vma_merge(prev, newflags) &&
00139 !(vma->rde_l_flags & VM_SHARED)) {
00140 spin_lock(&mm->page_table_lock);
00141 prev->rde_q_region_size = vma->rde$q_region_size;
00142 #if 0
00143 __vma_unlink(mm, vma, prev);
00144 #endif
00145 spin_unlock(&mm->page_table_lock);
00146
00147 kmem_cache_free(vm_area_cachep, vma);
00148 mm->map_count--;
00149
00150 return 0;
00151 }
00152
00153 spin_lock(&mm->page_table_lock);
00154 vma->rde_l_flags = newflags;
00155 vma->rde_r_regprot.regprt$l_region_prot = *(unsigned long *)&prot;
00156 spin_unlock(&mm->page_table_lock);
00157
00158 *pprev = vma;
00159
00160 return 0;
00161 }
00162
00163 static inline int mprotect_fixup_start(struct _rde * vma, struct _rde ** pprev,
00164 unsigned long end,
00165 int newflags, pgprot_t prot)
00166 {
00167 struct _rde * n, * prev = *pprev;
00168
00169 #if 0
00170 panic("fixup_start not implemented yet\n");
00171 #endif
00172
00173 *pprev = vma;
00174
00175 if (prev && (prev->rde_pq_start_va + prev->rde$q_region_size) == vma->rde$pq_start_va && can_vma_merge(prev, newflags) &&
00176 !(vma->rde_l_flags & VM_SHARED)) {
00177
00178 prev->rde_q_region_size = end - (unsigned long)prev->rde$pq_start_va;
00179 vma->rde_pq_start_va = end;
00180
00181
00182 return 0;
00183 }
00184 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
00185 if (!n)
00186 return -ENOMEM;
00187 *n = *vma;
00188 n->rde_q_region_size = end - (unsigned long)n->rde$pq_start_va;
00189 n->rde_l_flags = newflags;
00190
00191 n->rde_r_regprot.regprt$l_region_prot = *(unsigned long *)&prot;
00192 #if 0
00193 if (n->vm_file)
00194 get_file(n->vm_file);
00195 if (n->vm_ops && n->vm_ops->open)
00196 n->vm_ops->open(n);
00197 vma->vm_pgoff += (end - (unsigned long)vma->rde_pq_start_va) >> PAGE_SHIFT;
00198 #endif
00199 lock_vma_mappings(vma);
00200
00201 vma->rde_pq_start_va = end;
00202 vma->rde_q_region_size -= n->rde$q_region_size;
00203
00204 insrde(n,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
00205
00206 unlock_vma_mappings(vma);
00207
00208 return 0;
00209 }
00210
00211 static inline int mprotect_fixup_end(struct _rde * vma, struct _rde ** pprev,
00212 unsigned long start,
00213 int newflags, pgprot_t prot)
00214 {
00215 struct _rde * n;
00216
00217 n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
00218 if (!n)
00219 return -ENOMEM;
00220 *n = *vma;
00221 n->rde_pq_start_va = start;
00222
00223 n->rde_l_flags = newflags;
00224
00225 n->rde_r_regprot.regprt$l_region_prot = *(unsigned long *)&prot;
00226 #if 0
00227 if (n->vm_file)
00228 get_file(n->vm_file);
00229 if (n->vm_ops && n->vm_ops->open)
00230 n->vm_ops->open(n);
00231 #endif
00232 lock_vma_mappings(vma);
00233
00234 vma->rde_q_region_size = start - (unsigned long)vma->rde$pq_start_va;
00235 n->rde_q_region_size -= vma->rde$q_region_size;
00236
00237 insrde(n,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
00238
00239
00240 {
00241 struct _secdef * sec, * pstl;
00242 pgd_t *pgd;
00243 pud_t *pud;
00244 pmd_t *pmd;
00245 pte_t *pte;
00246 unsigned long page=start;
00247 unsigned long secno;
00248 unsigned long count;
00249 pgd = pgd_offset(current->mm, page);
00250 pud = pud_offset(pgd, page);
00251 pmd = pmd_offset(pud, page);
00252 pte = pte_offset(pmd, page);
00253 secno = ((*(unsigned long*)pte)&0xfffff000)>>PAGE_SHIFT;
00254 pstl=current->pcb_l_phd->phd$l_pst_base_offset;
00255 sec=&pstl[current->pcb_l_phd->phd$l_pst_free++];
00256 *sec=pstl[secno];
00257 sec->sec_l_vbn+=(vma->rde$q_region_size>>PAGE_SHIFT);
00258 for (count=start; count<(start+n->rde_q_region_size); count+=PAGE_SIZE) {
00259 pgd_t *pgd;
00260 pud_t *pud;
00261 pmd_t *pmd;
00262 pte_t *pte;
00263 pgd = pgd_offset(current->mm, count);
00264 pud = pud_offset(pgd, count);
00265 pmd = pmd_offset(pud, count);
00266 pte = pte_offset(pmd,count);
00267 if (((*(unsigned long*)pte)&0xfff)==0xc00)
00268 *(unsigned long*)pte = ((*(unsigned long*)pte)&0xfff)|((current->pcb_l_phd->phd$l_pst_free-1)<<PAGE_SHIFT);
00269 }
00270 }
00271
00272 unlock_vma_mappings(vma);
00273
00274 *pprev = n;
00275
00276 return 0;
00277 }
00278
00279 static inline int mprotect_fixup_middle(struct _rde * vma, struct _rde ** pprev,
00280 unsigned long start, unsigned long end,
00281 int newflags, pgprot_t prot)
00282 {
00283 struct _rde * left, * right;
00284
00285 panic("fixup_middle not implemented yet\n");
00286
00287 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
00288 if (!left)
00289 return -ENOMEM;
00290 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
00291 if (!right) {
00292 kmem_cache_free(vm_area_cachep, left);
00293 return -ENOMEM;
00294 }
00295 *left = *vma;
00296 *right = *vma;
00297 left->rde_q_region_size = start - (unsigned long)left->rde$pq_start_va;
00298 right->rde_pq_start_va = end;
00299 right->rde_q_region_size = end - (unsigned long)vma->rde$pq_start_va;
00300 #if 0
00301 right->vm_pgoff += (right->rde_pq_start_va - left->rde$pq_start_va) >> PAGE_SHIFT;
00302 left->vm_raend = 0;
00303 right->vm_raend = 0;
00304 if (vma->vm_file)
00305 atomic_add(2,&vma->vm_file->f_count);
00306 if (vma->vm_ops && vma->vm_ops->open) {
00307 vma->vm_ops->open(left);
00308 vma->vm_ops->open(right);
00309 }
00310 vma->vm_pgoff += (start - vma->rde_pq_start_va) >> PAGE_SHIFT;
00311 vma->vm_raend = 0;
00312 #endif
00313 vma->rde_r_regprot.regprt$l_region_prot = *(unsigned long *)&prot;
00314 lock_vma_mappings(vma);
00315
00316 vma->rde_pq_start_va = start;
00317 vma->rde_q_region_size = end - start;
00318 vma->rde_l_flags = newflags;
00319
00320 insrde(left,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
00321
00322 insrde(right,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
00323
00324 unlock_vma_mappings(vma);
00325
00326 *pprev = right;
00327
00328 return 0;
00329 }
00330
00331 static int mprotect_fixup(struct _rde * vma, struct _rde ** pprev,
00332 unsigned long start, unsigned long end, unsigned int newflags)
00333 {
00334 pgprot_t newprot;
00335 int error;
00336
00337 if ((newflags >> 8) == (vma->rde_l_flags >> 8)) {
00338 *pprev = vma;
00339 return 0;
00340 }
00341 newprot = protection_map[(newflags>>8) & 0xf];
00342 if (start == vma->rde_pq_start_va) {
00343 if (end == (vma->rde_pq_start_va + vma->rde$q_region_size))
00344 error = mprotect_fixup_all(vma, pprev, newflags, newprot);
00345 else
00346 error = mprotect_fixup_start(vma, pprev, end, newflags, newprot);
00347 } else if (end == (vma->rde_pq_start_va + vma->rde$q_region_size))
00348 error = mprotect_fixup_end(vma, pprev, start, newflags, newprot);
00349 else
00350 error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot);
00351
00352 if (error)
00353 return error;
00354
00355 change_protection(start, end, newprot);
00356 return 0;
00357 }
00358
00359 asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
00360 {
00361 unsigned long nstart, end, tmp;
00362 struct _rde * vma, * next, * prev;
00363 int error = -EINVAL;
00364
00365 if (start & ~PAGE_MASK)
00366 return -EINVAL;
00367 len = PAGE_ALIGN(len);
00368 end = start + len;
00369 if (end < start)
00370 return -EINVAL;
00371 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
00372 return -EINVAL;
00373 if (end == start)
00374 return 0;
00375
00376 down_write(¤t->mm->mmap_sem);
00377
00378
00379
00380 vma = find_vma_prev(current->pcb_l_phd,start,&prev);
00381
00382 error = -EFAULT;
00383 if (!vma || vma->rde_pq_start_va > start)
00384 goto out;
00385
00386 for (nstart = start ; ; ) {
00387 unsigned int newflags;
00388 int last = 0;
00389
00390
00391
00392 newflags = (prot<<8) | (vma->rde_l_flags & ~((PROT_READ | PROT_WRITE | PROT_EXEC)<<8));
00393 if ((newflags & ~(newflags >> 4)) & 0xf) {
00394 error = -EACCES;
00395 goto out;
00396 }
00397
00398 if ((vma->rde_pq_start_va + vma->rde$q_region_size) > end) {
00399 error = mprotect_fixup(vma, &prev, nstart, end, newflags);
00400 goto out;
00401 }
00402 if ((vma->rde_pq_start_va + vma->rde$q_region_size) == end)
00403 last = 1;
00404
00405 tmp = (vma->rde_pq_start_va + vma->rde$q_region_size);
00406 next = vma->rde_ps_va_list_flink;
00407 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
00408 if (error)
00409 goto out;
00410 if (last)
00411 break;
00412 nstart = tmp;
00413 vma = next;
00414 if (!vma || vma->rde_pq_start_va != nstart) {
00415 error = -EFAULT;
00416 goto out;
00417 }
00418 }
00419 if (next && (prev->rde_pq_start_va + prev->rde$q_region_size) == next->rde$pq_start_va && can_vma_merge(next, prev->rde$l_flags) &&
00420 !(prev->rde_l_flags & VM_SHARED)) {
00421
00422 prev->rde_q_region_size = next->rde$q_region_size;
00423
00424
00425
00426 kmem_cache_free(vm_area_cachep, next);
00427
00428 }
00429 out:
00430 up_write(¤t->mm->mmap_sem);
00431 return error;
00432 }
00433