00001
00002
00003
00004
00005
00006
00007 #ifndef _LINUX_MM_H
00008 #define _LINUX_MM_H
00009
00010 #include <linux/sched.h>
00011 #include <linux/errno.h>
00012
00013 #ifdef __KERNEL__
00014
00015 #include <linux/config.h>
00016 #include <linux/string.h>
00017 #include <linux/list.h>
00018 #include <linux/mmzone.h>
00019 #include <linux/swap.h>
00020 #include <linux/rbtree.h>
00021
00022 extern unsigned long max_mapnr;
00023 extern unsigned long num_physpages;
00024 extern unsigned long num_mappedpages;
00025 extern void * high_memory;
00026 extern int page_cluster;
00027
00028 extern struct list_head active_list;
00029 extern struct list_head inactive_list;
00030
00031 #include <asm/page.h>
00032 #include <asm/pgtable.h>
00033 #include <asm/atomic.h>
00034
00035 #include <pfndef.h>
00036 #include <rdedef.h>
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #define vm_area_struct _rde
00048
00049
00050
00051
00052
00053
00054 #define VM_READ 0x00000100
00055 #define VM_WRITE 0x00000200
00056 #define VM_EXEC 0x00000400
00057 #define VM_SHARED 0x00000800
00058
00059 #define VM_MAYREAD 0x00001000
00060 #define VM_MAYWRITE 0x00002000
00061 #define VM_MAYEXEC 0x00004000
00062 #define VM_MAYSHARE 0x00008000
00063
00064 #define VM_GROWSDOWN 0x00010000
00065 #define VM_GROWSUP 0x00020000
00066 #define VM_SHM 0x00000000
00067 #define VM_PFNMAP 0x00040000
00068 #define VM_DENYWRITE 0x00080000
00069
00070 #define VM_EXECUTABLE 0x00100000
00071 #define VM_LOCKED 0x00200000
00072 #define VM_IO 0x00400000
00073
00074
00075 #define VM_SEQ_READ 0x00800000
00076 #define VM_RAND_READ 0x01000000
00077
00078 #define VM_DONTCOPY 0x02000000
00079 #define VM_DONTEXPAND 0x04000000
00080 #define VM_RESERVED 0x08000000
00081
00082 #define VM_STACK_FLAGS 0x00017700
00083
00084 #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
00085 #define VM_ClearReadHint(v) (v)->rde_l_flags &= ~VM_READHINTMASK
00086 #define VM_NormalReadHint(v) (!((v)->rde_l_flags & VM_READHINTMASK))
00087 #define VM_SequentialReadHint(v) ((v)->rde_l_flags & VM_SEQ_READ)
00088 #define VM_RandomReadHint(v) ((v)->rde_l_flags & VM_RAND_READ)
00089
00090
00091 extern int vm_min_readahead;
00092 extern int vm_max_readahead;
00093
00094
00095
00096
00097
00098 extern pgprot_t protection_map[16];
00099
00100
00101
00102
00103
00104
00105
00106 struct vm_operations_struct {
00107 void (*open)(struct _rde * area);
00108 void (*close)(struct _rde * area);
00109 struct page * (*nopage)(struct _rde * area, unsigned long address, int unused);
00110 };
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124 #define get_page(p) atomic_inc(&(p)->count)
00125 #define put_page(p) __free_page(p)
00126 #define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
00127 #define page_count(p) atomic_read(&(p)->count)
00128 #define set_page_count(p,v) atomic_set(&(p)->count, v)
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215 #if 0
00216 #define PG_locked 3
00217 #define PG_error 20
00218 #define PG_referenced 21
00219 #define PG_uptodate 22
00220 #define PG_dirty 13
00221 #define PG_unused 23
00222 #define PG_lru 24
00223 #define PG_active 25
00224 #endif
00225 #define PG_slab 26
00226 #if 0
00227 #define PG_skip 27
00228
00229 #define PG_checked 28
00230 #define PG_arch_1 29
00231 #endif
00232 #define PG_reserved 30
00233 #if 0
00234 #define PG_launder 31
00235
00236
00237 #define UnlockPage(page) unlock_page(page)
00238 #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->pfn_l_page_state)
00239 #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->pfn_l_page_state)
00240 #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->pfn_l_page_state)
00241 #define PageDirty(page) test_bit(PG_dirty, &(page)->pfn_l_page_state)
00242 #define SetPageDirty(page) set_bit(PG_dirty, &(page)->pfn_l_page_state)
00243 #define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->pfn_l_page_state)
00244 #define PageLocked(page) test_bit(PG_locked, &(page)->pfn_l_page_state)
00245 #define LockPage(page) set_bit(PG_locked, &(page)->pfn_l_page_state)
00246 #define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->pfn_l_page_state)
00247 #define PageChecked(page) test_bit(PG_checked, &(page)->pfn_l_page_state)
00248 #define SetPageChecked(page) set_bit(PG_checked, &(page)->pfn_l_page_state)
00249 #define PageLaunder(page) test_bit(PG_launder, &(page)->pfn_l_page_state)
00250 #define SetPageLaunder(page) set_bit(PG_launder, &(page)->pfn_l_page_state)
00251
00252 extern void FASTCALL(set_page_dirty(struct page *));
00253
00254
00255
00256
00257
00258
00259
00260 #define PageError(page) test_bit(PG_error, &(page)->pfn_l_page_state)
00261 #define SetPageError(page) set_bit(PG_error, &(page)->pfn_l_page_state)
00262 #define ClearPageError(page) clear_bit(PG_error, &(page)->pfn_l_page_state)
00263 #define PageReferenced(page) test_bit(PG_referenced, &(page)->pfn_l_page_state)
00264 #define SetPageReferenced(page) set_bit(PG_referenced, &(page)->pfn_l_page_state)
00265 #define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->pfn_l_page_state)
00266 #define PageTestandClearReferenced(page) test_and_clear_bit(PG_referenced, &(page)->pfn_l_page_state)
00267 #else
00268 #define PageLocked(x) (0)
00269 #endif
00270 #define PageSlab(page) test_bit(PG_slab, &(page)->pfn_l_page_state)
00271 #define PageSetSlab(page) set_bit(PG_slab, &(page)->pfn_l_page_state)
00272 #define PageClearSlab(page) clear_bit(PG_slab, &(page)->pfn_l_page_state)
00273 #define PageReserved(page) test_bit(PG_reserved, &(page)->pfn_l_page_state)
00274 #if 0
00275
00276 #define PageActive(page) test_bit(PG_active, &(page)->pfn_l_page_state)
00277 #define SetPageActive(page) set_bit(PG_active, &(page)->pfn_l_page_state)
00278 #define ClearPageActive(page) clear_bit(PG_active, &(page)->pfn_l_page_state)
00279
00280 #define PageLRU(page) test_bit(PG_lru, &(page)->pfn_l_page_state)
00281 #define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->pfn_l_page_state)
00282 #define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->pfn_l_page_state)
00283
00284 #ifdef CONFIG_HIGHMEM
00285 #define PageHighMem(page) test_bit(PG_highmem, &(page)->pfn_l_page_state)
00286 #else
00287 #define PageHighMem(page) 0
00288 #endif
00289
00290 #endif
00291 #define SetPageReserved(page) set_bit(PG_reserved, &(page)->pfn_l_page_state)
00292 #define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->pfn_l_page_state)
00293
00294
00295
00296
00297 #define NOPAGE_SIGBUS (NULL)
00298 #define NOPAGE_OOM ((struct page *) (-1))
00299
00300
00301 extern mem_map_t * mem_map;
00302
00303 #define __page_address(page) ({ PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
00304 #define page_address(page) __page_address(page)
00305
00306
00307
00308
00309
00310
00311
00312 extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned int order));
00313 extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist));
00314 extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order);
00315
00316 #ifdef __arch_um__
00317 #ifndef HAVE_ARCH_VALIDATE
00318 static inline struct page *arch_validate(struct page *page,
00319 unsigned int gfp_mask, int order)
00320 {
00321 return(page);
00322 }
00323 #endif
00324 #endif
00325
00326 static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
00327 {
00328
00329
00330
00331 if (order >= MAX_ORDER)
00332 return NULL;
00333 #ifndef __arch_um__
00334 return _alloc_pages(gfp_mask, order);
00335 #else
00336 return arch_validate(_alloc_pages(gfp_mask, order), gfp_mask, order);
00337 #endif
00338 }
00339
00340 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
00341
00342 extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned int order));
00343 extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
00344
00345 #define __get_free_page(gfp_mask) \
00346 __get_free_pages((gfp_mask),0)
00347
00348 #define __get_dma_pages(gfp_mask, order) \
00349 __get_free_pages((gfp_mask) | GFP_DMA,(order))
00350
00351
00352
00353
00354 #define get_free_page get_zeroed_page
00355
00356
00357
00358
00359 extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
00360 extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
00361
00362 #define __free_page(page) __free_pages((page), 0)
00363 #define free_page(addr) free_pages((addr),0)
00364
00365 extern void show_free_areas(void);
00366 extern void show_free_areas_node(pg_data_t *pgdat);
00367
00368 extern void clear_page_tables(struct mm_struct *, unsigned long, int);
00369
00370 extern int fail_writepage(struct page *);
00371 struct page * shmem_nopage(struct _rde * vma, unsigned long address, int unused);
00372 struct file *shmem_file_setup(char * name, loff_t size);
00373 extern void shmem_lock(struct file * file, int lock);
00374 extern int shmem_zero_setup(struct _rde *);
00375
00376 extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
00377 extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct _rde *vma);
00378 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
00379 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
00380
00381 extern int vmtruncate(struct _fcb * inode, loff_t offset);
00382 extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
00383 extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address));
00384 extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
00385 extern int handle_mm_fault(struct mm_struct *mm,struct _rde *vma, unsigned long address, int write_access);
00386 extern int make_pages_present(unsigned long addr, unsigned long end);
00387 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
00388 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len);
00389 extern int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long dst, int len);
00390 extern int ptrace_attach(struct task_struct *tsk);
00391 extern int ptrace_detach(struct task_struct *, unsigned int);
00392 extern void ptrace_disable(struct task_struct *);
00393 extern int ptrace_check_attach(struct task_struct *task, int kill);
00394
00395 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
00396 int len, int write, int force, struct page **pages, struct _rde **vmas);
00397 void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);
00398
00399
00400
00401
00402
00403
00404 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
00405 {
00406 if (pud_none(*pud))
00407 return __pmd_alloc(mm, pud, address);
00408 return pmd_offset(pud, address);
00409 }
00410
00411
00412
00413
00414
00415
00416 static inline pmd_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
00417 {
00418 if (pgd_none(*pgd))
00419 return __pud_alloc(mm, pgd, address);
00420 return pud_offset(pgd, address);
00421 }
00422
00423 extern int pgt_cache_water[2];
00424 extern int check_pgt_cache(void);
00425
00426
00427
00428
00429 #define pte_lock_init(page) do {} while (0)
00430 #define pte_lock_deinit(page) do {} while (0)
00431 #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
00432
00433 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
00434 ({ \
00435 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
00436 pte_t *__pte = pte_offset_map(pmd, address); \
00437 *(ptlp) = __ptl; \
00438 spin_lock(__ptl); \
00439 __pte; \
00440 })
00441
00442 #define pte_unmap_unlock(pte, ptl) do { \
00443 spin_unlock(ptl); \
00444 pte_unmap(pte); \
00445 } while (0)
00446
00447 #define pte_alloc_map(mm, pmd, address) \
00448 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
00449 NULL: pte_offset_map(pmd, address))
00450
00451 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
00452 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
00453 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
00454
00455 #define pte_alloc_kernel(pmd, address) \
00456 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
00457 NULL: pte_offset_kernel(pmd, address))
00458
00459 extern void free_area_init(unsigned long * zones_size);
00460 extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
00461 unsigned long * zones_size, unsigned long zone_start_paddr,
00462 unsigned long *zholes_size);
00463 extern void mem_init(void);
00464 extern void show_mem(void);
00465 extern void si_meminfo(struct sysinfo * val);
00466 extern void swapin_readahead(swp_entry_t);
00467
00468 extern struct address_space swapper_space;
00469 #define PageSwapCache(page) ((page)->mapping == &swapper_space)
00470
00471 static inline int is_page_cache_freeable(struct page * page)
00472 {
00473 return page_count(page) == 1;
00474 }
00475
00476 extern int can_share_swap_page(struct page *);
00477 extern int remove_exclusive_swap_page(struct page *);
00478
00479 extern void __free_pte(pte_t);
00480
00481
00482 extern void lock_vma_mappings(struct _rde *);
00483 extern void unlock_vma_mappings(struct _rde *);
00484 extern void insert_vm_struct(struct mm_struct *, struct _rde *);
00485 extern void __insert_vm_struct(struct mm_struct *, struct _rde *);
00486 extern void build_mmap_rb(struct mm_struct *);
00487 extern void exit_mmap(struct mm_struct *);
00488
00489 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
00490
00491 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
00492 unsigned long len, unsigned long prot,
00493 unsigned long flag, unsigned long pgoff);
00494
00495 static inline unsigned long do_mmap(struct file *file, unsigned long addr,
00496 unsigned long len, unsigned long prot,
00497 unsigned long flag, unsigned long offset)
00498 {
00499 unsigned long ret = -EINVAL;
00500 if ((offset + PAGE_ALIGN(len)) < offset)
00501 goto out;
00502 if (!(offset & ~PAGE_MASK))
00503 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
00504 out:
00505 return ret;
00506 }
00507
00508 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
00509
00510 extern unsigned long do_brk(unsigned long, unsigned long);
00511
00512 #if 0
00513 static inline void __vma_unlink(struct mm_struct * mm, struct _rde * vma, struct _rde * prev)
00514 {
00515 prev->vm_next = vma->vm_next;
00516 rb_erase(&vma->vm_rb, &mm->mm_rb);
00517 if (mm->mmap_cache == vma)
00518 mm->mmap_cache = prev;
00519 }
00520 #endif
00521
00522 static inline int can_vma_merge(struct _rde * vma, unsigned long rde_l_flags)
00523 {
00524 #if 0
00525 if (!vma->vm_file && vma->rde_l_flags == rde$l_flags)
00526 return 1;
00527 else
00528 return 0;
00529 #endif
00530 return 0;
00531 }
00532
00533 struct zone_t;
00534
00535 extern void remove_inode_page(struct page *);
00536 extern unsigned long page_unuse(struct page *);
00537 extern void truncate_inode_pages(struct address_space *, loff_t);
00538
00539
00540 extern int filemap_sync(struct _rde *, unsigned long, size_t, unsigned int);
00541 extern struct page *filemap_nopage(struct _rde *, unsigned long, int);
00542
00543
00544
00545
00546
00547 #define __GFP_DMA 0x01
00548 #define __GFP_HIGHMEM 0x02
00549
00550
00551 #define __GFP_WAIT 0x10
00552 #define __GFP_HIGH 0x20
00553 #define __GFP_IO 0x40
00554 #define __GFP_HIGHIO 0x80
00555 #define __GFP_FS 0x100
00556
00557 #define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
00558 #define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
00559 #define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
00560 #define GFP_ATOMIC (__GFP_HIGH)
00561 #define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
00562 #define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
00563 #define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
00564 #define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
00565 #define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
00566
00567
00568
00569
00570 #define GFP_DMA __GFP_DMA
00571
00572 static inline unsigned int pf_gfp_mask(unsigned int gfp_mask)
00573 {
00574
00575 if (current->flags & PF_NOIO)
00576 gfp_mask &= ~(__GFP_IO | __GFP_HIGHIO | __GFP_FS);
00577
00578 return gfp_mask;
00579 }
00580
00581
00582
00583 static inline int expand_stack(struct _rde * vma, unsigned long address)
00584 {
00585 unsigned long grow;
00586
00587
00588
00589
00590
00591
00592 address &= PAGE_MASK;
00593
00594 grow = (unsigned long)(vma->rde_pq_start_va - address) >> PAGE_SHIFT;
00595 if ((vma->rde_pq_start_va + vma->rde$q_region_size) - address > current->rlim[RLIMIT_STACK].rlim_cur
00596 ) {
00597
00598 return -ENOMEM;
00599 }
00600 vma->rde_pq_start_va = address;
00601
00602
00603
00604
00605
00606 return 0;
00607 }
00608
00609
00610 extern struct _rde * find_vma(struct mm_struct * mm, unsigned long addr);
00611 extern struct _rde * find_vma_prev(struct mm_struct * mm, unsigned long addr,
00612 struct _rde **pprev);
00613
00614
00615
00616 static inline struct _rde * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
00617 {
00618 struct _rde * vma = find_vma(mm,start_addr);
00619
00620 if (vma && end_addr <= vma->rde_pq_start_va)
00621 vma = NULL;
00622 return vma;
00623 }
00624
00625 extern struct _rde *find_extend_vma(struct mm_struct *mm, unsigned long addr);
00626
00627 #endif
00628
00629 #endif
00630