00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015 #include <linux/config.h>
00016 #include <linux/slab.h>
00017 #include <linux/vmalloc.h>
00018 #include <linux/spinlock.h>
00019 #include <linux/highmem.h>
00020 #include <linux/smp_lock.h>
00021
00022 #include <asm/uaccess.h>
00023 #include <asm/pgalloc.h>
00024 #include <asm/pgtable.h>
00025
00026 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
00027 struct vm_struct * vmlist;
00028
00029 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
00030 {
00031 pte_t * pte;
00032 unsigned long end;
00033
00034 if (pmd_none(*pmd))
00035 return;
00036 if (pmd_bad(*pmd)) {
00037 pmd_ERROR(*pmd);
00038 pmd_clear(pmd);
00039 return;
00040 }
00041 pte = pte_offset(pmd, address);
00042 address &= ~PMD_MASK;
00043 end = address + size;
00044 if (end > PMD_SIZE)
00045 end = PMD_SIZE;
00046 do {
00047 pte_t page;
00048 page = ptep_get_and_clear(42, 42, pte);
00049 address += PAGE_SIZE;
00050 pte++;
00051 if (pte_none(page))
00052 continue;
00053 if (pte_present(page)) {
00054 struct page *ptpage = pte_page(page);
00055 if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
00056 __free_page(ptpage);
00057 continue;
00058 }
00059 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
00060 } while (address < end);
00061 }
00062
00063 static inline void free_area_pmd(pud_t * dir, unsigned long address, unsigned long size)
00064 {
00065 pmd_t * pmd;
00066 unsigned long end;
00067
00068 if (pud_none(*dir))
00069 return;
00070 if (pud_bad(*dir)) {
00071 pud_ERROR(*dir);
00072 pud_clear(dir);
00073 return;
00074 }
00075 pmd = pmd_offset(dir, address);
00076 address &= ~PGDIR_MASK;
00077 end = address + size;
00078 if (end > PGDIR_SIZE)
00079 end = PGDIR_SIZE;
00080 do {
00081 free_area_pte(pmd, address, end - address);
00082 address = (address + PMD_SIZE) & PMD_MASK;
00083 pmd++;
00084 } while (address < end);
00085 }
00086
00087 static inline void free_area_pud(pgd_t * dir, unsigned long address, unsigned long size)
00088 {
00089 pud_t * pud;
00090 unsigned long end;
00091
00092 if (pgd_none(*dir))
00093 return;
00094 if (pgd_bad(*dir)) {
00095 pgd_ERROR(*dir);
00096 pgd_clear(dir);
00097 return;
00098 }
00099 pud = pud_offset(dir, address);
00100 address &= ~PGDIR_MASK;
00101 end = address + size;
00102 if (end > PGDIR_SIZE)
00103 end = PGDIR_SIZE;
00104 do {
00105 free_area_pmd(pud, address, end - address);
00106 address = (address + PUD_SIZE) & PUD_MASK;
00107 pud++;
00108 } while (address < end);
00109 }
00110
00111 void vmfree_area_pages(unsigned long address, unsigned long size)
00112 {
00113 pgd_t * dir;
00114 unsigned long end = address + size;
00115
00116 dir = pgd_offset_k(address);
00117 flush_cache_all();
00118 do {
00119 free_area_pud(dir, address, end - address);
00120 address = (address + PGDIR_SIZE) & PGDIR_MASK;
00121 dir++;
00122 } while (address && (address < end));
00123 flush_tlb_all();
00124 }
00125
00126 static inline int alloc_area_pte (pte_t * pte, unsigned long address,
00127 unsigned long size, int gfp_mask, pgprot_t prot)
00128 {
00129 unsigned long end;
00130
00131 address &= ~PMD_MASK;
00132 end = address + size;
00133 if (end > PMD_SIZE)
00134 end = PMD_SIZE;
00135 do {
00136 struct page * page;
00137 spin_unlock(&init_mm.page_table_lock);
00138 page = alloc_page(gfp_mask);
00139 spin_lock(&init_mm.page_table_lock);
00140 if (!pte_none(*pte))
00141 printk(KERN_ERR "alloc_area_pte: page already exists\n");
00142 if (!page)
00143 return -ENOMEM;
00144 set_pte(pte, mk_pte(page, prot));
00145 address += PAGE_SIZE;
00146 pte++;
00147 } while (address < end);
00148 return 0;
00149 }
00150
00151 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot)
00152 {
00153 unsigned long end;
00154
00155 address &= ~PGDIR_MASK;
00156 end = address + size;
00157 if (end > PGDIR_SIZE)
00158 end = PGDIR_SIZE;
00159 do {
00160 pte_t * pte = pte_alloc(&init_mm, pmd, address);
00161 if (!pte)
00162 return -ENOMEM;
00163 if (alloc_area_pte(pte, address, end - address, gfp_mask, prot))
00164 return -ENOMEM;
00165 address = (address + PMD_SIZE) & PMD_MASK;
00166 pmd++;
00167 } while (address < end);
00168 return 0;
00169 }
00170
00171 static inline int alloc_area_pud(pud_t * pud, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot)
00172 {
00173 unsigned long end;
00174
00175 address &= ~PGDIR_MASK;
00176 end = address + size;
00177 if (end > PGDIR_SIZE)
00178 end = PGDIR_SIZE;
00179 do {
00180 pte_t * pte = pmd_alloc(&init_mm, pud, address);
00181 if (!pte)
00182 return -ENOMEM;
00183 if (alloc_area_pte(pte, address, end - address, gfp_mask, prot))
00184 return -ENOMEM;
00185 address = (address + PUD_SIZE) & PUD_MASK;
00186 pud++;
00187 } while (address < end);
00188 return 0;
00189 }
00190
00191 inline int vmalloc_area_pages (unsigned long address, unsigned long size,
00192 int gfp_mask, pgprot_t prot)
00193 {
00194 pgd_t * dir;
00195 unsigned long end = address + size;
00196 int ret;
00197
00198 dir = pgd_offset_k(address);
00199 spin_lock(&init_mm.page_table_lock);
00200 do {
00201 pud_t *pud;
00202
00203 pud = pud_alloc(&init_mm, dir, address);
00204 ret = -ENOMEM;
00205 if (!pud)
00206 break;
00207
00208 ret = -ENOMEM;
00209 if (alloc_area_pud(pud, address, end - address, gfp_mask, prot))
00210 break;
00211
00212 address = (address + PGDIR_SIZE) & PGDIR_MASK;
00213 dir++;
00214
00215 ret = 0;
00216 } while (address && (address < end));
00217 spin_unlock(&init_mm.page_table_lock);
00218 flush_cache_all();
00219 return ret;
00220 }
00221
00222 struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
00223 {
00224 unsigned long addr;
00225 struct vm_struct **p, *tmp, *area;
00226
00227 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
00228 if (!area)
00229 return NULL;
00230 size += PAGE_SIZE;
00231 addr = VMALLOC_START;
00232 write_lock(&vmlist_lock);
00233 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
00234 if ((size + addr) < addr)
00235 goto out;
00236 if (size + addr <= (unsigned long) tmp->addr)
00237 break;
00238 addr = tmp->size + (unsigned long) tmp->addr;
00239 if (addr > VMALLOC_END-size)
00240 goto out;
00241 }
00242 area->flags = flags;
00243 area->addr = (void *)addr;
00244 area->size = size;
00245 area->next = *p;
00246 *p = area;
00247 write_unlock(&vmlist_lock);
00248 return area;
00249
00250 out:
00251 write_unlock(&vmlist_lock);
00252 kfree(area);
00253 return NULL;
00254 }
00255
00256 void vfree(void * addr)
00257 {
00258 struct vm_struct **p, *tmp;
00259
00260 if (!addr)
00261 return;
00262 if ((PAGE_SIZE-1) & (unsigned long) addr) {
00263 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
00264 return;
00265 }
00266 write_lock(&vmlist_lock);
00267 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
00268 if (tmp->addr == addr) {
00269 *p = tmp->next;
00270 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
00271 write_unlock(&vmlist_lock);
00272 kfree(tmp);
00273 return;
00274 }
00275 }
00276 write_unlock(&vmlist_lock);
00277 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
00278 }
00279
00280 void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
00281 {
00282 void * addr;
00283 struct vm_struct *area;
00284
00285 size = PAGE_ALIGN(size);
00286 if (!size || (size >> PAGE_SHIFT) > num_physpages) {
00287 BUG();
00288 return NULL;
00289 }
00290 area = get_vm_area(size, VM_ALLOC);
00291 if (!area)
00292 return NULL;
00293 addr = area->addr;
00294 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, prot)) {
00295 vfree(addr);
00296 return NULL;
00297 }
00298 return addr;
00299 }
00300
00301 long vread(char *buf, char *addr, unsigned long count)
00302 {
00303 struct vm_struct *tmp;
00304 char *vaddr, *buf_start = buf;
00305 unsigned long n;
00306
00307
00308 if ((unsigned long) addr + count < count)
00309 count = -(unsigned long) addr;
00310
00311 read_lock(&vmlist_lock);
00312 for (tmp = vmlist; tmp; tmp = tmp->next) {
00313 vaddr = (char *) tmp->addr;
00314 if (addr >= vaddr + tmp->size - PAGE_SIZE)
00315 continue;
00316 while (addr < vaddr) {
00317 if (count == 0)
00318 goto finished;
00319 *buf = '\0';
00320 buf++;
00321 addr++;
00322 count--;
00323 }
00324 n = vaddr + tmp->size - PAGE_SIZE - addr;
00325 do {
00326 if (count == 0)
00327 goto finished;
00328 *buf = *addr;
00329 buf++;
00330 addr++;
00331 count--;
00332 } while (--n > 0);
00333 }
00334 finished:
00335 read_unlock(&vmlist_lock);
00336 return buf - buf_start;
00337 }
00338
00339 long vwrite(char *buf, char *addr, unsigned long count)
00340 {
00341 struct vm_struct *tmp;
00342 char *vaddr, *buf_start = buf;
00343 unsigned long n;
00344
00345
00346 if ((unsigned long) addr + count < count)
00347 count = -(unsigned long) addr;
00348
00349 read_lock(&vmlist_lock);
00350 for (tmp = vmlist; tmp; tmp = tmp->next) {
00351 vaddr = (char *) tmp->addr;
00352 if (addr >= vaddr + tmp->size - PAGE_SIZE)
00353 continue;
00354 while (addr < vaddr) {
00355 if (count == 0)
00356 goto finished;
00357 buf++;
00358 addr++;
00359 count--;
00360 }
00361 n = vaddr + tmp->size - PAGE_SIZE - addr;
00362 do {
00363 if (count == 0)
00364 goto finished;
00365 *addr = *buf;
00366 buf++;
00367 addr++;
00368 count--;
00369 } while (--n > 0);
00370 }
00371 finished:
00372 read_unlock(&vmlist_lock);
00373 return buf - buf_start;
00374 }
00375