00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024 #include <linux/config.h>
00025 #include <linux/module.h>
00026 #include <linux/init.h>
00027 #include <linux/devfs_fs_kernel.h>
00028 #include <linux/fs.h>
00029 #include <linux/mm.h>
00030 #include <linux/file.h>
00031 #include <linux/swap.h>
00032 #include <linux/pagemap.h>
00033 #include <linux/string.h>
00034 #include <linux/locks.h>
00035 #include <linux/smp_lock.h>
00036
00037 #include <asm/uaccess.h>
00038
00039
00040 #define TMPFS_MAGIC 0x01021994
00041
00042 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
00043
00044 #define SHMEM_SB(sb) (&sb->u.shmem_sb)
00045
00046 static struct super_operations shmem_ops;
00047 static struct address_space_operations shmem_aops;
00048 static struct file_operations shmem_file_operations;
00049 static struct inode_operations shmem_inode_operations;
00050 static struct file_operations shmem_dir_operations;
00051 static struct inode_operations shmem_dir_inode_operations;
00052 static struct vm_operations_struct shmem_vm_ops;
00053
00054 LIST_HEAD (shmem_inodes);
00055 static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED;
00056 atomic_t shmem_nrpages = ATOMIC_INIT(0);
00057
00058 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080 static void shmem_recalc_inode(struct _fcb * inode)
00081 {
00082 unsigned long freed;
00083
00084 freed = (inode->i_blocks/BLOCKS_PER_PAGE) -
00085 (inode->i_mapping->nrpages + SHMEM_I(inode)->swapped);
00086 if (freed){
00087 struct shmem_sb_info * sbinfo = SHMEM_SB(inode->i_sb);
00088 inode->i_blocks -= freed*BLOCKS_PER_PAGE;
00089 spin_lock (&sbinfo->stat_lock);
00090 sbinfo->free_blocks += freed;
00091 spin_unlock (&sbinfo->stat_lock);
00092 }
00093 }
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136 #define SHMEM_MAX_BLOCKS (SHMEM_NR_DIRECT + ENTRIES_PER_PAGE * ENTRIES_PER_PAGE/2*(ENTRIES_PER_PAGE+1))
00137
00138 static swp_entry_t * shmem_swp_entry (struct shmem_inode_info *info, unsigned long index, unsigned long page)
00139 {
00140 unsigned long offset;
00141 void **dir;
00142
00143 if (index < SHMEM_NR_DIRECT)
00144 return info->i_direct+index;
00145
00146 index -= SHMEM_NR_DIRECT;
00147 offset = index % ENTRIES_PER_PAGE;
00148 index /= ENTRIES_PER_PAGE;
00149
00150 if (!info->i_indirect) {
00151 info->i_indirect = (void *) page;
00152 return ERR_PTR(-ENOMEM);
00153 }
00154
00155 dir = info->i_indirect + index;
00156 if (index >= ENTRIES_PER_PAGE/2) {
00157 index -= ENTRIES_PER_PAGE/2;
00158 dir = info->i_indirect + ENTRIES_PER_PAGE/2
00159 + index/ENTRIES_PER_PAGE;
00160 index %= ENTRIES_PER_PAGE;
00161
00162 if(!*dir) {
00163 *dir = (void *) page;
00164
00165
00166 return ERR_PTR(-ENOMEM);
00167 }
00168 dir = ((void **)*dir) + index;
00169 }
00170 if (!*dir) {
00171 if (!page)
00172 return ERR_PTR(-ENOMEM);
00173 *dir = (void *)page;
00174 }
00175 return ((swp_entry_t *)*dir) + offset;
00176 }
00177
00178
00179
00180
00181
00182
00183
00184
00185 static inline swp_entry_t * shmem_alloc_entry (struct shmem_inode_info *info, unsigned long index)
00186 {
00187 unsigned long page = 0;
00188 swp_entry_t * res;
00189
00190 if (index >= SHMEM_MAX_BLOCKS)
00191 return ERR_PTR(-EFBIG);
00192
00193 if (info->next_index <= index)
00194 info->next_index = index + 1;
00195
00196 while ((res = shmem_swp_entry(info,index,page)) == ERR_PTR(-ENOMEM)) {
00197 page = get_zeroed_page(GFP_USER);
00198 if (!page)
00199 break;
00200 }
00201 return res;
00202 }
00203
00204
00205
00206
00207
00208
00209
00210 static int shmem_free_swp(swp_entry_t *dir, unsigned int count)
00211 {
00212 swp_entry_t *ptr, entry;
00213 int freed = 0;
00214
00215 for (ptr = dir; ptr < dir + count; ptr++) {
00216 if (!ptr->val)
00217 continue;
00218 entry = *ptr;
00219 *ptr = (swp_entry_t){0};
00220 freed++;
00221 #if 0
00222 free_swap_and_cache(entry);
00223 #endif
00224 }
00225 return freed;
00226 }
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239 static inline unsigned long
00240 shmem_truncate_direct(swp_entry_t *** dir, unsigned long start, unsigned long len) {
00241 swp_entry_t **last, **ptr;
00242 unsigned long off, freed = 0;
00243
00244 if (!*dir)
00245 return 0;
00246
00247 last = *dir + (len + ENTRIES_PER_PAGE-1) / ENTRIES_PER_PAGE;
00248 off = start % ENTRIES_PER_PAGE;
00249
00250 for (ptr = *dir + start/ENTRIES_PER_PAGE; ptr < last; ptr++) {
00251 if (!*ptr) {
00252 off = 0;
00253 continue;
00254 }
00255
00256 if (!off) {
00257 freed += shmem_free_swp(*ptr, ENTRIES_PER_PAGE);
00258 free_page ((unsigned long) *ptr);
00259 *ptr = 0;
00260 } else {
00261 freed += shmem_free_swp(*ptr+off,ENTRIES_PER_PAGE-off);
00262 off = 0;
00263 }
00264 }
00265
00266 if (!start) {
00267 free_page((unsigned long) *dir);
00268 *dir = 0;
00269 }
00270 return freed;
00271 }
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282 static inline unsigned long
00283 shmem_truncate_indirect(struct shmem_inode_info *info, unsigned long index)
00284 {
00285 swp_entry_t ***base;
00286 unsigned long baseidx, len, start;
00287 unsigned long max = info->next_index-1;
00288
00289 if (max < SHMEM_NR_DIRECT) {
00290 info->next_index = index;
00291 return shmem_free_swp(info->i_direct + index,
00292 SHMEM_NR_DIRECT - index);
00293 }
00294
00295 if (max < ENTRIES_PER_PAGE * ENTRIES_PER_PAGE/2 + SHMEM_NR_DIRECT) {
00296 max -= SHMEM_NR_DIRECT;
00297 base = (swp_entry_t ***) &info->i_indirect;
00298 baseidx = SHMEM_NR_DIRECT;
00299 len = max+1;
00300 } else {
00301 max -= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2+SHMEM_NR_DIRECT;
00302 if (max >= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2)
00303 BUG();
00304
00305 baseidx = max & ~(ENTRIES_PER_PAGE*ENTRIES_PER_PAGE-1);
00306 base = (swp_entry_t ***) info->i_indirect + ENTRIES_PER_PAGE/2 + baseidx/ENTRIES_PER_PAGE/ENTRIES_PER_PAGE ;
00307 len = max - baseidx + 1;
00308 baseidx += ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2+SHMEM_NR_DIRECT;
00309 }
00310
00311 if (index > baseidx) {
00312 info->next_index = index;
00313 start = index - baseidx;
00314 } else {
00315 info->next_index = baseidx;
00316 start = 0;
00317 }
00318 return shmem_truncate_direct(base, start, len);
00319 }
00320
00321 static void shmem_truncate (struct _fcb * inode)
00322 {
00323 unsigned long index;
00324 unsigned long freed = 0;
00325 struct shmem_inode_info * info = SHMEM_I(inode);
00326
00327 down(&info->sem);
00328 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
00329 spin_lock (&info->lock);
00330 index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
00331
00332 while (index < info->next_index)
00333 freed += shmem_truncate_indirect(info, index);
00334
00335 info->swapped -= freed;
00336 shmem_recalc_inode(inode);
00337 spin_unlock (&info->lock);
00338 up(&info->sem);
00339 }
00340
00341 static void shmem_delete_inode(struct _fcb * inode)
00342 {
00343 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
00344
00345 inode->i_size = 0;
00346 if (inode->i_op->truncate == shmem_truncate){
00347 spin_lock (&shmem_ilock);
00348 list_del (&SHMEM_I(inode)->list);
00349 spin_unlock (&shmem_ilock);
00350 shmem_truncate (inode);
00351 }
00352 spin_lock (&sbinfo->stat_lock);
00353 sbinfo->free_inodes++;
00354 spin_unlock (&sbinfo->stat_lock);
00355 clear_inode(inode);
00356 }
00357
00358 static int shmem_clear_swp (swp_entry_t entry, swp_entry_t *ptr, int size) {
00359 swp_entry_t *test;
00360
00361 for (test = ptr; test < ptr + size; test++) {
00362 if (test->val == entry.val) {
00363 #if 0
00364 swap_free (entry);
00365 #endif
00366 *test = (swp_entry_t) {0};
00367 return test - ptr;
00368 }
00369 }
00370 return -1;
00371 }
00372
00373 static int shmem_unuse_inode (struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
00374 {
00375 swp_entry_t *ptr;
00376 unsigned long idx;
00377 int offset;
00378
00379 idx = 0;
00380 spin_lock (&info->lock);
00381 offset = shmem_clear_swp (entry, info->i_direct, SHMEM_NR_DIRECT);
00382 if (offset >= 0)
00383 goto found;
00384
00385 for (idx = SHMEM_NR_DIRECT; idx < info->next_index;
00386 idx += ENTRIES_PER_PAGE) {
00387 ptr = shmem_swp_entry(info, idx, 0);
00388 if (IS_ERR(ptr))
00389 continue;
00390 offset = shmem_clear_swp (entry, ptr, ENTRIES_PER_PAGE);
00391 if (offset >= 0)
00392 goto found;
00393 }
00394 spin_unlock (&info->lock);
00395 return 0;
00396 found:
00397 #if 0
00398 delete_from_swap_cache(page);
00399
00400 SetPageDirty(page);
00401 SetPageUptodate(page);
00402 #endif
00403 info->swapped--;
00404 spin_unlock(&info->lock);
00405 return 1;
00406 }
00407
00408
00409
00410
00411 void shmem_unuse(swp_entry_t entry, struct page *page)
00412 {
00413 struct list_head *p;
00414 struct shmem_inode_info * info;
00415
00416 spin_lock (&shmem_ilock);
00417 list_for_each(p, &shmem_inodes) {
00418 info = list_entry(p, struct shmem_inode_info, list);
00419
00420 if (shmem_unuse_inode(info, entry, page))
00421 break;
00422 }
00423 spin_unlock (&shmem_ilock);
00424 }
00425
00426
00427
00428
00429
00430
00431
00432
00433 static int shmem_writepage(struct page * page)
00434 {
00435 struct shmem_inode_info *info;
00436 swp_entry_t *entry, swap;
00437 struct address_space *mapping;
00438 unsigned long index;
00439 struct _fcb *inode;
00440
00441 #if 0
00442 if (!PageLocked(page))
00443 BUG();
00444 if (!PageLaunder(page))
00445 return fail_writepage(page);
00446 #endif
00447
00448 #if 0
00449 mapping = page->mapping;
00450 index = page->index;
00451 #endif
00452 inode = mapping->host;
00453 info = SHMEM_I(inode);
00454 #if 0
00455 if (info->locked)
00456 return fail_writepage(page);
00457 #endif
00458 getswap:
00459 #if 0
00460 swap = get_swap_page();
00461 if (!swap.val)
00462 return fail_writepage(page);
00463 #endif
00464
00465 spin_lock(&info->lock);
00466 entry = shmem_swp_entry(info, index, 0);
00467 if (IS_ERR(entry))
00468 BUG();
00469 shmem_recalc_inode(inode);
00470 if (entry->val)
00471 BUG();
00472
00473
00474
00475 page_cache_release(page);
00476
00477
00478 #if 0
00479 if (add_to_swap_cache(page, swap) != 0) {
00480
00481
00482
00483
00484
00485 spin_unlock(&info->lock);
00486 swap_free(swap);
00487 goto getswap;
00488 }
00489 #endif
00490
00491 *entry = swap;
00492 info->swapped++;
00493 spin_unlock(&info->lock);
00494 #if 0
00495 SetPageUptodate(page);
00496 set_page_dirty(page);
00497 UnlockPage(page);
00498 #endif
00499 return 0;
00500 }
00501
00502
00503
00504
00505
00506
00507
00508
00509
00510
00511
00512
00513 static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct _fcb * inode, unsigned long idx)
00514 {
00515 struct address_space * mapping = inode->i_mapping;
00516 struct shmem_sb_info *sbinfo;
00517 struct page * page;
00518 swp_entry_t *entry;
00519
00520 repeat:
00521 page = 0;
00522 if (page)
00523 return page;
00524
00525 entry = shmem_alloc_entry (info, idx);
00526 if (IS_ERR(entry))
00527 return (void *)entry;
00528
00529 spin_lock (&info->lock);
00530
00531
00532
00533
00534
00535
00536 page = 0;
00537 if (page) {
00538 #if 0
00539 if (TryLockPage(page))
00540 goto wait_retry;
00541 #endif
00542 spin_unlock (&info->lock);
00543 return page;
00544 }
00545
00546 shmem_recalc_inode(inode);
00547 if (entry->val) {
00548 unsigned long flags;
00549
00550
00551 page = 0;
00552 if (!page) {
00553 swp_entry_t swap = *entry;
00554 spin_unlock (&info->lock);
00555 #if 0
00556 swapin_readahead(*entry);
00557 page = read_swap_cache_async(*entry);
00558 #endif
00559 if (!page) {
00560 if (entry->val != swap.val)
00561 goto repeat;
00562 return ERR_PTR(-ENOMEM);
00563 }
00564
00565 #if 0
00566 if (!Page_Uptodate(page) && entry->val == swap.val) {
00567 page_cache_release(page);
00568 return ERR_PTR(-EIO);
00569 }
00570 #endif
00571
00572
00573
00574 page_cache_release(page);
00575 goto repeat;
00576 }
00577
00578
00579 #if 0
00580 if (TryLockPage(page))
00581 goto wait_retry;
00582
00583 swap_free(*entry);
00584 *entry = (swp_entry_t) {0};
00585 delete_from_swap_cache(page);
00586 flags = page->pfn_l_page_state & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_referenced) | (1 << PG_arch_1));
00587 page->pfn_l_page_state = flags | (1 << PG_dirty);
00588 #endif
00589
00590 info->swapped--;
00591 spin_unlock (&info->lock);
00592 } else {
00593 sbinfo = SHMEM_SB(inode->i_sb);
00594 spin_unlock (&info->lock);
00595 spin_lock (&sbinfo->stat_lock);
00596 if (sbinfo->free_blocks == 0)
00597 goto no_space;
00598 sbinfo->free_blocks--;
00599 spin_unlock (&sbinfo->stat_lock);
00600
00601
00602
00603
00604
00605
00606
00607
00608 page = page_cache_alloc(mapping);
00609 if (!page)
00610 return ERR_PTR(-ENOMEM);
00611 clear_highpage(page);
00612 inode->i_blocks += BLOCKS_PER_PAGE;
00613
00614 }
00615
00616
00617 #if 0
00618 SetPageUptodate(page);
00619 #endif
00620 return page;
00621 no_space:
00622 spin_unlock (&sbinfo->stat_lock);
00623 return ERR_PTR(-ENOSPC);
00624
00625 wait_retry:
00626 spin_unlock (&info->lock);
00627
00628 page_cache_release(page);
00629 goto repeat;
00630 }
00631
00632 static int shmem_getpage(struct _fcb * inode, unsigned long idx, struct page **ptr)
00633 {
00634 struct shmem_inode_info *info = SHMEM_I(inode);
00635 int error;
00636
00637 down (&info->sem);
00638 *ptr = ERR_PTR(-EFAULT);
00639 if (inode->i_size <= (loff_t) idx * PAGE_CACHE_SIZE)
00640 goto failed;
00641
00642 *ptr = shmem_getpage_locked(info, inode, idx);
00643 if (IS_ERR (*ptr))
00644 goto failed;
00645
00646 #if 0
00647 UnlockPage(*ptr);
00648 #endif
00649 up (&info->sem);
00650 return 0;
00651 failed:
00652 up (&info->sem);
00653 error = PTR_ERR(*ptr);
00654 *ptr = NOPAGE_SIGBUS;
00655 if (error == -ENOMEM)
00656 *ptr = NOPAGE_OOM;
00657 return error;
00658 }
00659
00660 struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused)
00661 {
00662 #if 0
00663 struct page * page;
00664 unsigned int idx;
00665 struct _fcb * inode = vma->vm_file->f_dentry->d_inode;
00666
00667 idx = (address - vma->vm_start) >> PAGE_CACHE_SHIFT;
00668 idx += vma->vm_pgoff;
00669
00670 if (shmem_getpage(inode, idx, &page))
00671 return page;
00672
00673 flush_page_to_ram(page);
00674 return(page);
00675 #endif
00676 return 0;
00677 }
00678
00679 void shmem_lock(struct file * file, int lock)
00680 {
00681 struct _fcb * inode = file->f_dentry->d_inode;
00682 struct shmem_inode_info * info = SHMEM_I(inode);
00683
00684 down(&info->sem);
00685 info->locked = lock;
00686 up(&info->sem);
00687 }
00688
00689 static int shmem_mmap(struct file * file, struct vm_area_struct * vma)
00690 {
00691 #if 0
00692 struct vm_operations_struct * ops;
00693 struct _fcb *inode = file->f_dentry->d_inode;
00694
00695 ops = &shmem_vm_ops;
00696 if (!inode->i_sb || !S_ISREG(inode->i_mode))
00697 return -EACCES;
00698 UPDATE_ATIME(inode);
00699 vma->vm_ops = ops;
00700 #endif
00701 return 0;
00702 }
00703
00704 struct _fcb *shmem_get_inode(struct super_block *sb, int mode, int dev)
00705 {
00706 struct _fcb * inode;
00707 struct shmem_inode_info *info;
00708 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
00709
00710 spin_lock (&sbinfo->stat_lock);
00711 if (!sbinfo->free_inodes) {
00712 spin_unlock (&sbinfo->stat_lock);
00713 return NULL;
00714 }
00715 sbinfo->free_inodes--;
00716 spin_unlock (&sbinfo->stat_lock);
00717
00718 inode = new_inode(sb);
00719 if (inode) {
00720 inode->i_mode = mode;
00721 inode->i_uid = current->fsuid;
00722 inode->i_gid = current->fsgid;
00723 inode->i_blksize = PAGE_CACHE_SIZE;
00724 inode->i_blocks = 0;
00725 inode->i_rdev = NODEV;
00726 inode->i_mapping->a_ops = &shmem_aops;
00727 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
00728 info = SHMEM_I(inode);
00729 info->inode = inode;
00730 spin_lock_init (&info->lock);
00731 sema_init (&info->sem, 1);
00732 switch (mode & S_IFMT) {
00733 default:
00734 init_special_inode(inode, mode, dev);
00735 break;
00736 case S_IFREG:
00737 inode->i_op = &shmem_inode_operations;
00738 inode->i_fop = &shmem_file_operations;
00739 spin_lock (&shmem_ilock);
00740 list_add (&SHMEM_I(inode)->list, &shmem_inodes);
00741 spin_unlock (&shmem_ilock);
00742 break;
00743 case S_IFDIR:
00744 inode->i_nlink++;
00745 inode->i_op = &shmem_dir_inode_operations;
00746 inode->i_fop = &shmem_dir_operations;
00747 break;
00748 case S_IFLNK:
00749 break;
00750 }
00751 }
00752 return inode;
00753 }
00754
00755 static int shmem_set_size(struct shmem_sb_info *info,
00756 unsigned long max_blocks, unsigned long max_inodes)
00757 {
00758 int error;
00759 unsigned long blocks, inodes;
00760
00761 spin_lock(&info->stat_lock);
00762 blocks = info->max_blocks - info->free_blocks;
00763 inodes = info->max_inodes - info->free_inodes;
00764 error = -EINVAL;
00765 if (max_blocks < blocks)
00766 goto out;
00767 if (max_inodes < inodes)
00768 goto out;
00769 error = 0;
00770 info->max_blocks = max_blocks;
00771 info->free_blocks = max_blocks - blocks;
00772 info->max_inodes = max_inodes;
00773 info->free_inodes = max_inodes - inodes;
00774 out:
00775 spin_unlock(&info->stat_lock);
00776 return error;
00777 }
00778
00779 static struct super_block *shmem_read_super(struct super_block * sb, void * data, int silent)
00780 {
00781 struct _fcb * inode;
00782 struct dentry * root;
00783 unsigned long blocks, inodes;
00784 int mode = S_IRWXUGO | S_ISVTX;
00785 uid_t uid = current->fsuid;
00786 gid_t gid = current->fsgid;
00787 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
00788 struct sysinfo si;
00789
00790
00791
00792
00793
00794 si_meminfo(&si);
00795 blocks = inodes = si.totalram / 2;
00796
00797 #ifdef CONFIG_TMPFS
00798 if (shmem_parse_options (data, &mode, &uid, &gid, &blocks, &inodes))
00799 return NULL;
00800 #endif
00801
00802 spin_lock_init (&sbinfo->stat_lock);
00803 sbinfo->max_blocks = blocks;
00804 sbinfo->free_blocks = blocks;
00805 sbinfo->max_inodes = inodes;
00806 sbinfo->free_inodes = inodes;
00807 sb->s_maxbytes = (unsigned long long) SHMEM_MAX_BLOCKS << PAGE_CACHE_SHIFT;
00808 sb->s_blocksize = PAGE_CACHE_SIZE;
00809 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
00810 sb->s_magic = TMPFS_MAGIC;
00811 sb->s_op = &shmem_ops;
00812 inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
00813 if (!inode)
00814 return NULL;
00815
00816 inode->i_uid = uid;
00817 inode->i_gid = gid;
00818 root = d_alloc_root(inode);
00819 if (!root) {
00820 iput(inode);
00821 return NULL;
00822 }
00823 sb->s_root = root;
00824 return sb;
00825 }
00826
00827
00828
00829 static struct address_space_operations shmem_aops = {
00830 writepage: shmem_writepage,
00831 };
00832
00833 static struct file_operations shmem_file_operations = {
00834 mmap: shmem_mmap,
00835 #ifdef CONFIG_TMPFS
00836 read: shmem_file_read,
00837 write: shmem_file_write,
00838 fsync: shmem_sync_file,
00839 #endif
00840 };
00841
00842 static struct inode_operations shmem_inode_operations = {
00843 truncate: shmem_truncate,
00844 };
00845
00846 static struct file_operations shmem_dir_operations = {
00847 read: generic_read_dir,
00848 readdir: dcache_readdir,
00849 #ifdef CONFIG_TMPFS
00850 fsync: shmem_sync_file,
00851 #endif
00852 };
00853
00854 static struct inode_operations shmem_dir_inode_operations = {
00855 #ifdef CONFIG_TMPFS
00856 create: shmem_create,
00857 lookup: shmem_lookup,
00858 link: shmem_link,
00859 unlink: shmem_unlink,
00860 symlink: shmem_symlink,
00861 mkdir: shmem_mkdir,
00862 rmdir: shmem_rmdir,
00863 mknod: shmem_mknod,
00864 rename: shmem_rename,
00865 #endif
00866 };
00867
00868 static struct super_operations shmem_ops = {
00869 #ifdef CONFIG_TMPFS
00870 statfs: shmem_statfs,
00871 remount_fs: shmem_remount_fs,
00872 #endif
00873 delete_inode: shmem_delete_inode,
00874 put_inode: force_delete,
00875 };
00876
00877 static struct vm_operations_struct shmem_vm_ops = {
00878 nopage: shmem_nopage,
00879 };
00880
00881 #ifdef CONFIG_TMPFS
00882
00883 static DECLARE_FSTYPE(shmem_fs_type, "shm", shmem_read_super, FS_LITTER);
00884 static DECLARE_FSTYPE(tmpfs_fs_type, "tmpfs", shmem_read_super, FS_LITTER);
00885 #else
00886 static DECLARE_FSTYPE(tmpfs_fs_type, "tmpfs", shmem_read_super, FS_LITTER|FS_NOMOUNT);
00887 #endif
00888 static struct vfsmount *shm_mnt;
00889
00890 static int __init init_shmem_fs(void)
00891 {
00892 int error;
00893 struct vfsmount * res;
00894
00895 if ((error = register_filesystem(&tmpfs_fs_type))) {
00896 printk (KERN_ERR "Could not register tmpfs\n");
00897 return error;
00898 }
00899 #ifdef CONFIG_TMPFS
00900 if ((error = register_filesystem(&shmem_fs_type))) {
00901 printk (KERN_ERR "Could not register shm fs\n");
00902 return error;
00903 }
00904 devfs_mk_dir (NULL, "shm", NULL);
00905 #endif
00906 res = kern_mount(&tmpfs_fs_type);
00907 if (IS_ERR (res)) {
00908 printk (KERN_ERR "could not kern_mount tmpfs\n");
00909 unregister_filesystem(&tmpfs_fs_type);
00910 return PTR_ERR(res);
00911 }
00912 shm_mnt = res;
00913
00914
00915 if ((error = shmem_set_size(SHMEM_SB(res->mnt_sb), ULONG_MAX, ULONG_MAX)))
00916 printk (KERN_ERR "could not set limits on internal tmpfs\n");
00917
00918 return 0;
00919 }
00920
00921 static void __exit exit_shmem_fs(void)
00922 {
00923 #ifdef CONFIG_TMPFS
00924 unregister_filesystem(&shmem_fs_type);
00925 #endif
00926 unregister_filesystem(&tmpfs_fs_type);
00927 mntput(shm_mnt);
00928 }
00929
00930 module_init(init_shmem_fs)
00931 module_exit(exit_shmem_fs)
00932
00933
00934
00935
00936
00937
00938
00939
00940 struct file *shmem_file_setup(char * name, loff_t size)
00941 {
00942 int error;
00943 struct file *file;
00944 struct _fcb * inode;
00945 struct dentry *dentry, *root;
00946 struct qstr this;
00947 int vm_enough_memory(long pages);
00948
00949 if (size > (unsigned long long) SHMEM_MAX_BLOCKS << PAGE_CACHE_SHIFT)
00950 return ERR_PTR(-EINVAL);
00951
00952 if (!vm_enough_memory((size) >> PAGE_CACHE_SHIFT))
00953 return ERR_PTR(-ENOMEM);
00954
00955 this.name = name;
00956 this.len = strlen(name);
00957 this.hash = 0;
00958 root = shm_mnt->mnt_root;
00959 dentry = d_alloc(root, &this);
00960 if (!dentry)
00961 return ERR_PTR(-ENOMEM);
00962
00963 error = -ENFILE;
00964 file = get_empty_filp();
00965 if (!file)
00966 goto put_dentry;
00967
00968 error = -ENOSPC;
00969 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
00970 if (!inode)
00971 goto close_file;
00972
00973 d_instantiate(dentry, inode);
00974 dentry->d_inode->i_size = size;
00975 shmem_truncate(inode);
00976 file->f_vfsmnt = mntget(shm_mnt);
00977 file->f_dentry = dentry;
00978 file->f_op = &shmem_file_operations;
00979 file->f_mode = FMODE_WRITE | FMODE_READ;
00980 inode->i_nlink = 0;
00981 return(file);
00982
00983 close_file:
00984 put_filp(file);
00985 put_dentry:
00986 dput (dentry);
00987 return ERR_PTR(error);
00988 }
00989
00990
00991
00992
00993
00994 int shmem_zero_setup(struct vm_area_struct *vma)
00995 {
00996 struct file *file;
00997 loff_t size = vma->rde_q_region_size;
00998
00999 file = shmem_file_setup("dev/zero", size);
01000 if (IS_ERR(file))
01001 return PTR_ERR(file);
01002
01003 #if 0
01004 if (vma->vm_file)
01005 fput (vma->vm_file);
01006 vma->vm_file = file;
01007 vma->vm_ops = &shmem_vm_ops;
01008 #endif
01009 return 0;
01010 }
01011
01012 EXPORT_SYMBOL(shmem_file_setup);
01013