00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037 #include <linux/config.h>
00038 #include <linux/sched.h>
00039 #include <linux/fs.h>
00040 #include <linux/slab.h>
00041 #include <linux/locks.h>
00042 #include <linux/errno.h>
00043 #include <linux/swap.h>
00044 #include <linux/swapctl.h>
00045 #include <linux/smp_lock.h>
00046 #include <linux/vmalloc.h>
00047 #include <linux/blkdev.h>
00048 #include <linux/sysrq.h>
00049 #include <linux/file.h>
00050 #include <linux/init.h>
00051 #include <linux/quotaops.h>
00052 #include <linux/iobuf.h>
00053 #include <linux/highmem.h>
00054 #include <linux/module.h>
00055 #include <linux/completion.h>
00056
00057 #include <asm/uaccess.h>
00058 #include <asm/io.h>
00059 #include <asm/bitops.h>
00060 #include <asm/mmu_context.h>
00061 #include <asm/hw_irq.h>
00062
00063 #include <fcbdef.h>
00064 #include <pridef.h>
00065 #include <iodef.h>
00066 #include <misc.h>
00067 #include <rvtdef.h>
00068 #include <vcbdef.h>
00069 #include <ucbdef.h>
00070 #include <linux/ext2_fs.h>
00071
00072 #include <misc_routines.h>
00073 #include "../../ext2/src/x2p.h"
00074
00075 #define EXT2_EF 30
00076
00077 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
00078 #define NR_RESERVED (10*MAX_BUF_PER_PAGE)
00079 #define MAX_UNUSED_BUFFERS NR_RESERVED+20
00080
00081
00082
00083
00084
00085
00086 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_inode_buffers)
00087
00088
00089
00090
00091 #if 0
00092 static unsigned int bh_hash_mask;
00093 static unsigned int bh_hash_shift;
00094 static struct buffer_head **hash_table;
00095 static rwlock_t hash_table_lock = RW_LOCK_UNLOCKED;
00096
00097 static struct buffer_head *lru_list[NR_LIST];
00098 static spinlock_t lru_list_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
00099 static int nr_buffers_type[NR_LIST];
00100 static unsigned long size_buffers_type[NR_LIST];
00101
00102 static struct buffer_head * unused_list;
00103 static int nr_unused_buffer_heads;
00104 static spinlock_t unused_list_lock = SPIN_LOCK_UNLOCKED;
00105 static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
00106 #endif
00107
00108 static int grow_buffers(kdev_t dev, unsigned long block, int size);
00109 static void __refile_buffer(struct buffer_head *);
00110
00111
00112 atomic_t buffermem_pages = ATOMIC_INIT(0);
00113
00114
00115
00116
00117
00118
00119 #define N_PARAM 9
00120
00121
00122
00123
00124 union bdflush_param {
00125 struct {
00126 int nfract;
00127
00128 int dummy1;
00129 int dummy2;
00130 int dummy3;
00131 int interval;
00132 int age_buffer;
00133 int nfract_sync;
00134
00135 int dummy4;
00136 int dummy5;
00137 } b_un;
00138 unsigned int data[N_PARAM];
00139 } bdf_prm = {{40, 0, 0, 0, 5*HZ, 30*HZ, 60, 0, 0}};
00140
00141
00142 int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 1*HZ, 0, 0, 0};
00143 int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,10000*HZ, 6000*HZ, 100, 0, 0};
00144
00145 void fastcall unlock_buffer(struct buffer_head *bh)
00146 {
00147 clear_bit(BH_Wait_IO, &bh->b_state);
00148 clear_bit(BH_launder, &bh->b_state);
00149 clear_bit(BH_Lock, &bh->b_state);
00150 smp_mb__after_clear_bit();
00151 if (waitqueue_active(&bh->b_wait))
00152 wake_up(&bh->b_wait);
00153
00154 }
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165 void __wait_on_buffer(struct buffer_head * bh)
00166 {
00167 struct task_struct *tsk = current;
00168 DECLARE_WAITQUEUE(wait, tsk);
00169
00170 get_bh(bh);
00171 add_wait_queue(&bh->b_wait, &wait);
00172 do {
00173
00174 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
00175 if (!buffer_locked(bh))
00176 break;
00177 schedule();
00178 } while (buffer_locked(bh));
00179 tsk->state = TASK_RUNNING;
00180 remove_wait_queue(&bh->b_wait, &wait);
00181 put_bh(bh);
00182 }
00183
00184
00185
00186
00187
00188 void end_buffer_io_sync(struct buffer_head *bh, int uptodate)
00189 {
00190 mark_buffer_uptodate(bh, uptodate);
00191 unlock_buffer(bh);
00192 put_bh(bh);
00193 }
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205 int sync_buffers(kdev_t dev, int wait)
00206 {
00207 int err = 0;
00208
00209
00210
00211
00212
00213
00214
00215 #if 0
00216 write_unlocked_buffers(dev);
00217 if (wait) {
00218 err = wait_for_locked_buffers(dev, BUF_DIRTY, 0);
00219 write_unlocked_buffers(dev);
00220 err |= wait_for_locked_buffers(dev, BUF_LOCKED, 1);
00221 }
00222 #endif
00223 return err;
00224 }
00225
00226 int fsync_super(struct super_block *sb)
00227 {
00228 #if 0
00229 kdev_t dev = sb->s_dev;
00230 sync_buffers(dev, 0);
00231
00232 lock_kernel();
00233 sync_inodes_sb(sb);
00234 DQUOT_SYNC(dev);
00235 lock_super(sb);
00236 if (sb->s_dirt && sb->s_op && sb->s_op->write_super)
00237 sb->s_op->write_super(sb);
00238 unlock_super(sb);
00239 unlock_kernel();
00240
00241 return sync_buffers(dev, 1);
00242 #else
00243 return 0;
00244 #endif
00245 }
00246
00247 int fsync_no_super(kdev_t dev)
00248 {
00249 sync_buffers(dev, 0);
00250 return sync_buffers(dev, 1);
00251 }
00252
00253 int fsync_dev(kdev_t dev)
00254 {
00255 #if 0
00256 sync_buffers(dev, 0);
00257
00258 lock_kernel();
00259 sync_inodes(dev);
00260 DQUOT_SYNC(dev);
00261 sync_supers(dev);
00262 unlock_kernel();
00263
00264 return sync_buffers(dev, 1);
00265 #else
00266 return 0;
00267 #endif
00268 }
00269
00270
00271
00272
00273
00274 void sync_dev(kdev_t dev)
00275 {
00276 fsync_dev(dev);
00277 }
00278
00279 asmlinkage long sys_sync(void)
00280 {
00281 fsync_dev(0);
00282 return 0;
00283 }
00284
00285
00286
00287
00288
00289 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
00290 {
00291 #if 0
00292
00293 struct _fcb * inode = dentry->d_inode;
00294 struct super_block * sb;
00295 kdev_t dev;
00296 int ret;
00297
00298 lock_kernel();
00299
00300 write_inode_now(inode, 0);
00301
00302
00303 sb = inode->i_sb;
00304 lock_super(sb);
00305 if (sb->s_op && sb->s_op->write_super)
00306 sb->s_op->write_super(sb);
00307 unlock_super(sb);
00308
00309
00310 dev = FCB_DEV(inode);
00311 ret = sync_buffers(dev, 1);
00312 unlock_kernel();
00313 return ret;
00314 #endif
00315 return 0;
00316 }
00317
00318 asmlinkage long sys_fsync(unsigned int fd)
00319 {
00320 struct file * file;
00321 struct dentry * dentry;
00322 struct _fcb * inode;
00323 int ret, err;
00324
00325 ret = -EBADF;
00326 file = fget(fd);
00327 if (!file)
00328 goto out;
00329
00330 dentry = file->f_dentry;
00331 inode = dentry->d_inode;
00332
00333 ret = -EINVAL;
00334 if (!file->f_op || !file->f_op->fsync) {
00335
00336 goto out_putf;
00337 }
00338
00339
00340 #if 0
00341
00342 down(&inode->i_sem);
00343 ret = filemap_fdatasync(inode->i_mapping);
00344 err = file->f_op->fsync(file, dentry, 0);
00345 if (err && !ret)
00346 ret = err;
00347 err = filemap_fdatawait(inode->i_mapping);
00348 if (err && !ret)
00349 ret = err;
00350 up(&inode->i_sem);
00351 #endif
00352
00353 out_putf:
00354 fput(file);
00355 out:
00356 return ret;
00357 }
00358
00359 asmlinkage long sys_fdatasync(unsigned int fd)
00360 {
00361 struct file * file;
00362 struct dentry * dentry;
00363 struct _fcb * inode;
00364 int ret, err;
00365
00366 ret = -EBADF;
00367 file = fget(fd);
00368 if (!file)
00369 goto out;
00370
00371 dentry = file->f_dentry;
00372 inode = dentry->d_inode;
00373
00374 ret = -EINVAL;
00375 if (!file->f_op || !file->f_op->fsync)
00376 goto out_putf;
00377
00378 #if 0
00379
00380 down(&inode->i_sem);
00381 ret = filemap_fdatasync(inode->i_mapping);
00382 err = file->f_op->fsync(file, dentry, 1);
00383 if (err && !ret)
00384 ret = err;
00385 err = filemap_fdatawait(inode->i_mapping);
00386 if (err && !ret)
00387 ret = err;
00388 up(&inode->i_sem);
00389 #endif
00390
00391 out_putf:
00392 fput(file);
00393 out:
00394 return ret;
00395 }
00396
00397 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
00398 {
00399 bh->b_list = BUF_CLEAN;
00400 bh->b_end_io = handler;
00401 bh->b_private = private;
00402 }
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414 struct buffer_head * getblk(kdev_t dev, int block, int size)
00415 {
00416 struct buffer_head * bh;
00417 bh = kmalloc (sizeof(struct buffer_head), GFP_KERNEL);
00418 init_buffer(bh, NULL, NULL);
00419 init_waitqueue_head(&bh->b_wait);
00420 get_bh(bh);
00421
00422 bh -> b_dev = dev;
00423 bh -> b_data = kmalloc(size, GFP_KERNEL);
00424 bh -> b_blocknr = (long)block;
00425 bh -> b_size = (long)size;
00426 return bh;
00427 }
00428
00429
00430
00431
00432
00433
00434
00435
00436 void balance_dirty(void)
00437 {
00438 int state = 0;
00439
00440 if (state < 0)
00441 return;
00442
00443
00444 #if 0
00445 spin_lock(&lru_list_lock);
00446 write_some_buffers(NODEV);
00447 #endif
00448
00449
00450
00451
00452
00453
00454
00455 #if 0
00456 if (state > 0) {
00457 wait_for_some_buffers(NODEV);
00458 wakeup_bdflush();
00459 }
00460 #endif
00461 }
00462
00463 inline void fastcall __mark_dirty(struct buffer_head *bh)
00464 {
00465 bh->b_flushtime = jiffies + bdf_prm.b_un.age_buffer;
00466
00467 }
00468
00469
00470
00471 void fastcall __mark_buffer_dirty(struct buffer_head *bh)
00472 {
00473 if (!atomic_set_buffer_dirty(bh))
00474 __mark_dirty(bh);
00475 }
00476
00477 void fastcall mark_buffer_dirty(struct buffer_head *bh)
00478 {
00479 if (!atomic_set_buffer_dirty(bh)) {
00480 __mark_dirty(bh);
00481 balance_dirty();
00482 }
00483 }
00484
00485 void set_buffer_flushtime(struct buffer_head *bh)
00486 {
00487 bh->b_flushtime = jiffies + bdf_prm.b_un.age_buffer;
00488 }
00489 EXPORT_SYMBOL(set_buffer_flushtime);
00490
00491
00492
00493
00494 void __brelse(struct buffer_head * buf)
00495 {
00496 if (atomic_read(&buf->b_count)) {
00497 put_bh(buf);
00498 #if 1
00499 if (atomic_read(&buf->b_count)==0)
00500 kfree(buf);
00501 #endif
00502 return;
00503 }
00504 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
00505 }
00506
00507
00508
00509
00510
00511 void __bforget(struct buffer_head * buf)
00512 {
00513
00514 __brelse(buf);
00515 }
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
00526 struct buffer_head * bread(kdev_t dev, int block, int size)
00527 {
00528 struct buffer_head * bh;
00529 struct _iosb iosb;
00530 int sts;
00531
00532 bh = kmalloc (sizeof(struct buffer_head), GFP_KERNEL);
00533 init_buffer(bh, NULL, NULL);
00534 init_waitqueue_head(&bh->b_wait);
00535 get_bh(bh);
00536
00537 bh -> b_dev = dev;
00538 bh -> b_data = kmalloc(size, GFP_KERNEL);
00539 bh -> b_size = (long)size;
00540 bh -> b_blocknr = (long)block;
00541
00542
00543
00544
00545 sts = exe_qiow(EXT2_EF,dev2chan(dev),IO$_READPBLK,&iosb,0,0,
00546 bh -> b_data,size,block,MINOR(dev)&31,0,0);
00547
00548 set_bit(BH_Uptodate, &bh->b_state);
00549 return bh;
00550 }
00551
00552
00553
00554
00555
00556
00557 int try_to_release_page(struct page * page, int gfp_mask)
00558 {
00559 return 0;
00560 }
00561
00562
00563
00564
00565
00566
00567
00568
00569 int discard_bh_page(struct page *page, unsigned long offset, int drop_pagecache)
00570 {
00571 return 1;
00572 }
00573
00574
00575
00576
00577 static int __block_write_full_page2(struct _fcb *inode, struct page *page, unsigned long pageno)
00578 {
00579 struct _fcb * fcb=e2_search_fcb(inode);
00580 unsigned long iblock, lblock;
00581 int err, i;
00582 unsigned long block;
00583 int need_unlock;
00584 int sts;
00585 struct _iosb iosb;
00586 int turns=0;
00587 signed int blocknr;
00588 unsigned long blocksize;
00589 struct _vcb * vcb = exttwo_get_current_vcb();
00590 struct ext2_super_block * sb = vcb->vcb_l_cache;
00591 int i_blkbits = EXT2_BLOCK_SIZE_BITS(sb);
00592
00593 blocksize = 1 << i_blkbits;
00594
00595 block = pageno << (PAGE_CACHE_SHIFT - i_blkbits);
00596 iblock = pageno << (PAGE_CACHE_SHIFT - i_blkbits);
00597
00598 i = 0;
00599
00600
00601 do {
00602
00603
00604
00605
00606
00607
00608
00609
00610 if (fcb)
00611 blocknr=e2_map_vbn(fcb,iblock);
00612 else
00613 blocknr=iblock;
00614 if (blocknr==-1) {
00615 err=ext2_get_block(vcb, inode, iblock, &blocknr, 1, fcb);
00616 if (err)
00617 goto out;
00618 }
00619 struct _ucb * ucb;
00620 if (vcb->vcb_l_rvn) {
00621 struct _rvt * rvt = vcb->vcb_l_rvt;
00622 struct _ucb ** ucblst = rvt->rvt_l_ucblst;
00623 ucb = ucblst[fcb->fcb_b_fid_rvn - 1];
00624 } else
00625 ucb = vcb->vcb_l_rvt;
00626 sts = exe_qiow(EXT2_EF,(unsigned short)x2p->io_channel,IO$_WRITEPBLK,&iosb,0,0,
00627 page_address(page)+turns*blocksize,blocksize, blocknr*vms_block_factor(i_blkbits),ucb->ucb_w_fill_0,0,0);
00628
00629 turns++;
00630 block++;
00631 iblock++;
00632 } while (turns<(PAGE_SIZE/blocksize));
00633
00634
00635 #if 0
00636 SetPageUptodate(page);
00637 #endif
00638 return 0;
00639
00640 out:
00641
00642
00643
00644
00645
00646 #if 0
00647 ClearPageUptodate(page);
00648 #endif
00649 return err;
00650 }
00651
00652 static int __block_prepare_write(struct _fcb *inode, struct page *page,
00653 unsigned from, unsigned to, unsigned long pageno)
00654 {
00655 struct _fcb * fcb=e2_search_fcb(inode);
00656 unsigned block_start, block_end;
00657 unsigned long block;
00658 int err = 0;
00659 unsigned blocksize, bbits;
00660 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait, *arr[MAX_BUF_PER_PAGE];
00661 char *kaddr = kmap(page);
00662 int turns = 0;
00663 int sts;
00664 struct _iosb iosb;
00665 int blocknr;
00666
00667 struct _vcb * vcb = exttwo_get_current_vcb();
00668 struct ext2_super_block * sb = vcb->vcb_l_cache;
00669 int i_blkbits = EXT2_BLOCK_SIZE_BITS(sb);
00670 blocksize = 1 << i_blkbits;
00671
00672 bbits = i_blkbits;
00673 block = pageno << (PAGE_CACHE_SHIFT - bbits);
00674
00675 for(block_start = 0; turns<(PAGE_SIZE/blocksize);
00676 block++, block_start=block_end, turns++) {
00677
00678 block_end = block_start+blocksize;
00679 if (block_end <= from)
00680 continue;
00681 if (block_start >= to)
00682 break;
00683
00684 if (fcb)
00685 blocknr=e2_map_vbn(fcb,block);
00686 else
00687 blocknr=block;
00688 if (blocknr==-1) {
00689 err=ext2_get_block(vcb, inode, block, &blocknr, 1, fcb);
00690 if (err)
00691 goto out;
00692 }
00693 if (block_end > to)
00694 memset(kaddr+to, 0, block_end-to);
00695 if (block_start < from)
00696 memset(kaddr+block_start, 0, from-block_start);
00697 if (block_end > to || block_start < from)
00698 flush_dcache_page(page);
00699 if ((block_start < from || block_end > to)) {
00700 struct _ucb * ucb;
00701 if (vcb->vcb_l_rvn) {
00702 struct _rvt * rvt = vcb->vcb_l_rvt;
00703 struct _ucb ** ucblst = rvt->rvt_l_ucblst;
00704 ucb = ucblst[fcb->fcb_b_fid_rvn - 1];
00705 } else
00706 ucb = vcb->vcb_l_rvt;
00707 sts = exe_qiow(EXT2_EF,(unsigned short)x2p->io_channel,IO$_READPBLK,&iosb,0,0,
00708 kaddr+turns*blocksize,blocksize, blocknr*vms_block_factor(i_blkbits),ucb->ucb_w_fill_0,0,0);
00709 }
00710 }
00711 return 0;
00712 out:
00713
00714
00715
00716
00717
00718
00719 return err;
00720 }
00721
00722 static int __block_commit_write(struct _fcb *inode, struct page *page,
00723 unsigned from, unsigned to, unsigned long pageno)
00724 {
00725 struct _fcb * fcb=e2_search_fcb(inode);
00726 unsigned block_start, block_end;
00727 int partial = 0, need_balance_dirty = 0;
00728 unsigned blocksize;
00729 struct buffer_head *bh, **arr;
00730 int turns=0;
00731 int sts;
00732 struct _iosb iosb;
00733 signed long blocknr, block;
00734 int bbits;
00735
00736 struct _vcb * vcb = exttwo_get_current_vcb();
00737 struct ext2_super_block * sb = vcb->vcb_l_cache;
00738 int i_blkbits = EXT2_BLOCK_SIZE_BITS(sb);
00739 blocksize = 1 << i_blkbits;
00740
00741 bbits = i_blkbits;
00742 block = pageno << (PAGE_CACHE_SHIFT - bbits);
00743
00744 for(block_start = 0;
00745 turns<(PAGE_SIZE/blocksize);
00746 block_start=block_end, turns++, block++) {
00747 blocknr=e2_map_vbn(fcb,block);
00748 block_end = block_start + blocksize;
00749 if (block_end <= from || block_start >= to) {
00750 partial = 1;
00751 } else {
00752 struct _ucb * ucb;
00753 if (vcb->vcb_l_rvn) {
00754 struct _rvt * rvt = vcb->vcb_l_rvt;
00755 struct _ucb ** ucblst = rvt->rvt_l_ucblst;
00756 ucb = ucblst[fcb->fcb_b_fid_rvn - 1];
00757 } else
00758 ucb = vcb->vcb_l_rvt;
00759 sts = exe_qiow(EXT2_EF,(unsigned short)x2p->io_channel,IO$_WRITEPBLK,&iosb,0,0,
00760 page_address(page)+turns*blocksize,blocksize, blocknr*vms_block_factor(i_blkbits),ucb->ucb_w_fill_0,0,0);
00761 }
00762 }
00763
00764
00765
00766
00767
00768
00769
00770 #if 0
00771 if (!partial)
00772 SetPageUptodate(page);
00773 #endif
00774 return 0;
00775 }
00776
00777
00778
00779
00780
00781
00782
00783
00784 int block_read_full_page2(struct _fcb *inode,struct page *page, unsigned long pageno)
00785 {
00786 struct _fcb * fcb=e2_search_fcb(inode);
00787 unsigned long iblock, lblock;
00788 unsigned int blocksize, blocks;
00789 int nr, i;
00790 int sts;
00791 struct _iosb iosb;
00792 int turns;
00793 unsigned long blocknr;
00794
00795 struct _vcb * vcb = exttwo_get_current_vcb();
00796 struct ext2_super_block * sb = vcb->vcb_l_cache;
00797 int i_blkbits = EXT2_BLOCK_SIZE_BITS(sb);
00798 blocksize = 1 << i_blkbits;
00799
00800 blocks = PAGE_CACHE_SIZE >> i_blkbits;
00801 iblock = pageno << (PAGE_CACHE_SHIFT - i_blkbits);
00802 lblock = (inode->fcb_l_filesize+blocksize-1) >> i_blkbits;
00803
00804 nr = 0;
00805 i = 0;
00806 turns = 0;
00807
00808 do {
00809 if (iblock < lblock) {
00810 if (fcb)
00811 blocknr=e2_map_vbn(fcb,iblock);
00812 else
00813 blocknr=iblock;
00814 } else {
00815 continue;
00816 }
00817 #if 0
00818
00819 memset(kmap(page) + i*blocksize, 0, blocksize);
00820 flush_dcache_page(page);
00821 kunmap(page);
00822 continue;
00823 #endif
00824
00825 nr++;
00826
00827 struct _ucb * ucb;
00828 if (vcb->vcb_l_rvn) {
00829 struct _rvt * rvt = vcb->vcb_l_rvt;
00830 struct _ucb ** ucblst = rvt->rvt_l_ucblst;
00831 ucb = ucblst[fcb->fcb_b_fid_rvn - 1];
00832 } else
00833 ucb = vcb->vcb_l_rvt;
00834 sts = exe_qiow(EXT2_EF,(unsigned short)x2p->io_channel,IO$_READPBLK,&iosb,0,0,
00835 page_address(page) + i*blocksize,blocksize, blocknr*vms_block_factor(i_blkbits),ucb->ucb_w_fill_0,0,0);
00836
00837 } while (i++, iblock++, turns++, turns<(PAGE_SIZE/blocksize));
00838
00839 #if 0
00840 SetPageUptodate(page);
00841
00842 #endif
00843
00844 return 0;
00845 }
00846
00847 int block_read_full_page3(struct _fcb * fcb,struct page *page, unsigned long pageno)
00848 {
00849 unsigned long iblock, lblock;
00850 unsigned int blocksize, blocks;
00851 int nr, i;
00852 int sts;
00853 struct _iosb iosb;
00854 int turns;
00855 unsigned long blocknr;
00856
00857 struct _vcb * vcb = exttwo_get_current_vcb();
00858 struct ext2_super_block * sb = vcb->vcb_l_cache;
00859 int i_blkbits = EXT2_BLOCK_SIZE_BITS(sb);
00860 blocksize = 1 << i_blkbits;
00861
00862 blocks = PAGE_CACHE_SIZE >> i_blkbits;
00863 iblock = pageno << (PAGE_CACHE_SHIFT - i_blkbits);
00864 #if 0
00865 lblock = (inode->fcb_l_filesize+blocksize-1) >> i_blkbits;
00866 #else
00867 lblock = fcb->fcb_l_efblk;
00868 #endif
00869
00870 nr = 0;
00871 i = 0;
00872 turns = 0;
00873
00874 do {
00875 if (iblock < lblock) {
00876 if (fcb) {
00877 blocknr=e2_map_vbn(fcb,iblock);
00878 if ((blocknr+1)==0) {
00879
00880 printk("ebl %lx %lx %lx\n",fcb, iblock, blocknr);
00881 continue;
00882 }
00883 }
00884 else
00885 blocknr=iblock;
00886 } else {
00887 continue;
00888 }
00889
00890 #if 0
00891
00892 memset(kmap(page) + i*blocksize, 0, blocksize);
00893 flush_dcache_page(page);
00894 kunmap(page);
00895 continue;
00896 #endif
00897
00898 nr++;
00899
00900
00901 struct _ucb * ucb;
00902 if (vcb->vcb_l_rvn) {
00903 struct _rvt * rvt = vcb->vcb_l_rvt;
00904 struct _ucb ** ucblst = rvt->rvt_l_ucblst;
00905 ucb = ucblst[fcb->fcb_b_fid_rvn - 1];
00906 } else
00907 ucb = vcb->vcb_l_rvt;
00908 sts = exe_qiow(EXT2_EF,(unsigned short)x2p->io_channel,IO$_READPBLK,&iosb,0,0,
00909 page_address(page) + i*blocksize,blocksize, blocknr*vms_block_factor(i_blkbits),ucb->ucb_w_fill_0,0,0);
00910
00911 } while (i++, iblock++, turns++, turns<(PAGE_SIZE/blocksize));
00912
00913 #if 0
00914 SetPageUptodate(page);
00915
00916 #endif
00917
00918 return 0;
00919 }
00920
00921
00922 static loff_t ext2_max_size(int bits)
00923 {
00924 loff_t res = EXT2_NDIR_BLOCKS;
00925 res += 1LL << (bits-2);
00926 res += 1LL << (2*(bits-2));
00927 res += 1LL << (3*(bits-2));
00928 res <<= bits;
00929 if (res > (512LL << 32) - (1 << bits))
00930 res = (512LL << 32) - (1 << bits);
00931 return res;
00932 }
00933
00934
00935
00936
00937
00938 int generic_cont_expand(struct _fcb *inode, loff_t size)
00939 {
00940 struct page *page;
00941 unsigned long index, offset, limit;
00942 int err;
00943
00944 err = -EFBIG;
00945 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
00946 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
00947 send_sig(SIGXFSZ, current, 0);
00948 goto out;
00949 }
00950 struct _vcb * vcb = exttwo_get_current_vcb();
00951 struct ext2_super_block * sb = vcb->vcb_l_cache;
00952 if (size > ext2_max_size(EXT2_BLOCK_SIZE_BITS(sb)))
00953 goto out;
00954
00955 offset = (size & (PAGE_CACHE_SIZE-1));
00956
00957
00958
00959
00960
00961 if ((offset & (EXT2_BLOCK_SIZE(sb) - 1)) == 0) {
00962 offset++;
00963 }
00964 index = size >> PAGE_CACHE_SHIFT;
00965 err = -ENOMEM;
00966 page = alloc_pages(GFP_KERNEL, 0);
00967 if (!page)
00968 goto out;
00969 err = block_prepare_write2(inode, page, offset, offset, index);
00970 if (!err) {
00971 err = block_commit_write2(inode, page, offset, offset, index);
00972 }
00973 #if 0
00974 UnlockPage(page);
00975 #endif
00976 page_cache_release(page);
00977 if (err > 0)
00978 err = 0;
00979 out:
00980 return err;
00981 }
00982
00983
00984
00985
00986
00987
00988 int cont_prepare_write(struct page *page, unsigned offset, unsigned to, get_block_t *get_block, unsigned long *bytes)
00989 {
00990 #if 0
00991 struct address_space *mapping = page->mapping;
00992 struct _fcb *inode = mapping->host;
00993 struct page *new_page;
00994 unsigned long pgpos;
00995 long status;
00996 unsigned zerofrom;
00997 struct _vcb * vcb = exttwo_get_current_vcb();
00998 struct ext2_super_block * sb = vcb->vcb_l_cache;
00999 int i_blkbits = EXT2_BLOCK_SIZE_BITS(sb);
01000 unsigned blocksize = 1 << i_blkbits;
01001 char *kaddr;
01002
01003 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
01004 status = -ENOMEM;
01005 new_page = alloc_pages(GFP_KERNEL, 0);
01006 if (!new_page)
01007 goto out;
01008
01009 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
01010 #if 0
01011 UnlockPage(new_page);
01012 #endif
01013 page_cache_release(new_page);
01014 continue;
01015 }
01016 zerofrom = *bytes & ~PAGE_CACHE_MASK;
01017 if (zerofrom & (blocksize-1)) {
01018 *bytes |= (blocksize-1);
01019 (*bytes)++;
01020 }
01021 status = __block_prepare_write(inode, new_page, zerofrom,
01022 PAGE_CACHE_SIZE, get_block);
01023 if (status)
01024 goto out_unmap;
01025 kaddr = page_address(new_page);
01026 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
01027 flush_dcache_page(new_page);
01028 __block_commit_write(inode, new_page, zerofrom, PAGE_CACHE_SIZE,pageno);
01029 kunmap(new_page);
01030 #if 0
01031 UnlockPage(new_page);
01032 #endif
01033 page_cache_release(new_page);
01034 }
01035
01036 if (page->index < pgpos) {
01037
01038 zerofrom = offset;
01039 } else {
01040
01041 zerofrom = *bytes & ~PAGE_CACHE_MASK;
01042
01043
01044 if (to > zerofrom && (zerofrom & (blocksize-1))) {
01045 *bytes |= (blocksize-1);
01046 (*bytes)++;
01047 }
01048
01049
01050 if (offset <= zerofrom)
01051 zerofrom = offset;
01052 }
01053 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
01054 if (status)
01055 goto out1;
01056 kaddr = page_address(page);
01057 if (zerofrom < offset) {
01058 memset(kaddr+zerofrom, 0, offset-zerofrom);
01059 flush_dcache_page(page);
01060 __block_commit_write(inode, page, zerofrom, offset);
01061 }
01062 return 0;
01063 out1:
01064 #if 0
01065 ClearPageUptodate(page);
01066 #endif
01067 kunmap(page);
01068 return status;
01069
01070 out_unmap:
01071 #if 0
01072 ClearPageUptodate(new_page);
01073 #endif
01074 kunmap(new_page);
01075 #if 0
01076 UnlockPage(new_page);
01077 #endif
01078 page_cache_release(new_page);
01079 out:
01080 return status;
01081 #endif
01082 }
01083
01084 int block_prepare_write2(struct _fcb *inode, struct page *page, unsigned from, unsigned to, unsigned long pageno)
01085 {
01086 int err = __block_prepare_write(inode, page, from, to, pageno);
01087 if (err) {
01088 #if 0
01089 ClearPageUptodate(page);
01090 #endif
01091 kunmap(page);
01092 }
01093 return err;
01094 }
01095
01096 int block_commit_write2(struct _fcb * inode, struct page *page, unsigned from, unsigned to, unsigned long pageno)
01097 {
01098 __block_commit_write(inode,page,from,to,pageno);
01099 kunmap(page);
01100 return 0;
01101 }
01102
01103 int generic_commit_write2(struct _fcb * inode, struct page *page,
01104 unsigned from, unsigned to, unsigned long pageno)
01105 {
01106 loff_t pos = ((loff_t)pageno << PAGE_CACHE_SHIFT) + to;
01107 __block_commit_write(inode,page,from,to,pageno);
01108 kunmap(page);
01109 if (pos > inode->fcb_l_filesize) {
01110 inode->fcb_l_filesize = pos;
01111 struct _vcb * vcb = exttwo_get_current_vcb();
01112 ext2_sync_inode(vcb, inode);
01113 }
01114 return 0;
01115 }
01116
01117 int block_truncate_page(struct address_space *mapping, loff_t from,get_block_t *get_block)
01118 {
01119
01120 struct _fcb * inode = (void*) mapping;
01121 struct _fcb * fcb=e2_search_fcb(inode);
01122 unsigned long index = from >> PAGE_CACHE_SHIFT;
01123 unsigned offset = from & (PAGE_CACHE_SIZE-1);
01124 unsigned blocksize, iblock, length, pos;
01125 struct page *page;
01126 int err;
01127 int lbn;
01128
01129 struct _vcb * vcb = exttwo_get_current_vcb();
01130 struct ext2_super_block * sb = vcb->vcb_l_cache;
01131 int i_blkbits = EXT2_BLOCK_SIZE_BITS(sb);
01132 blocksize = 1 << i_blkbits;
01133 length = offset & (blocksize - 1);
01134
01135
01136 if (!length)
01137 return 0;
01138
01139 length = blocksize - length;
01140 iblock = index << (PAGE_CACHE_SHIFT - i_blkbits);
01141
01142 page = alloc_pages(GFP_KERNEL, 0);
01143 err = -ENOMEM;
01144 if (!page)
01145 goto out;
01146
01147
01148 pos = blocksize;
01149 while (offset >= pos) {
01150 iblock++;
01151 pos += blocksize;
01152 }
01153
01154 err = 0;
01155
01156 lbn = e2_map_vbn(fcb,iblock);
01157
01158 myqio(READ, page_address(page)+pos,blocksize,lbn,0,vms_block_factor(i_blkbits));
01159
01160 memset(kmap(page) + offset, 0, length);
01161 flush_dcache_page(page);
01162 kunmap(page);
01163
01164 err = 0;
01165
01166 unlock:
01167 #if 0
01168 UnlockPage(page);
01169 #endif
01170 page_cache_release(page);
01171 out:
01172 return err;
01173 }
01174
01175 int block_write_full_page2(struct _fcb *inode, struct page *page, unsigned long pageno)
01176 {
01177 struct _fcb * fcb=e2_search_fcb(inode);
01178 unsigned long end_index = inode->fcb_l_filesize >> PAGE_CACHE_SHIFT;
01179 unsigned offset;
01180 int err;
01181
01182
01183 if (pageno < end_index)
01184 return __block_write_full_page2(inode, page, pageno);
01185
01186
01187 offset = inode->fcb_l_filesize & (PAGE_CACHE_SIZE-1);
01188
01189 if (pageno >= end_index+1 || !offset) {
01190 #if 0
01191 UnlockPage(page);
01192 #endif
01193 return -EIO;
01194 }
01195
01196
01197 err = __block_prepare_write(inode, page, 0, offset, pageno);
01198 if (!err) {
01199 memset(page_address(page) + offset, 0, PAGE_CACHE_SIZE - offset);
01200 flush_dcache_page(page);
01201 __block_commit_write(inode,page,0,offset,pageno);
01202 done:
01203 kunmap(page);
01204 #if 0
01205 UnlockPage(page);
01206 #endif
01207 return err;
01208 }
01209 #if 0
01210 ClearPageUptodate(page);
01211 #endif
01212 goto done;
01213 }
01214
01215 int block_write_full_page3(struct _fcb * fcb, struct page *page, unsigned long pageno)
01216 {
01217 struct _fcb * inode=fcb->fcb_l_primfcb;
01218 unsigned long end_index = inode->fcb_l_filesize >> PAGE_CACHE_SHIFT;
01219 unsigned offset;
01220 int err;
01221
01222
01223 if (pageno < end_index)
01224 return __block_write_full_page2(inode, page, pageno);
01225
01226
01227 offset = inode->fcb_l_filesize & (PAGE_CACHE_SIZE-1);
01228
01229 if (pageno >= end_index+1 || !offset) {
01230 #if 0
01231 UnlockPage(page);
01232 #endif
01233 return -EIO;
01234 }
01235
01236
01237 err = __block_prepare_write(inode, page, 0, offset, pageno);
01238 if (!err) {
01239 memset(page_address(page) + offset, 0, PAGE_CACHE_SIZE - offset);
01240 flush_dcache_page(page);
01241 __block_commit_write(inode,page,0,offset,pageno);
01242 done:
01243 kunmap(page);
01244 #if 0
01245 UnlockPage(page);
01246 #endif
01247 return err;
01248 }
01249 #if 0
01250 ClearPageUptodate(page);
01251 #endif
01252 goto done;
01253 }
01254
01255 int generic_block_bmap(struct address_space *mapping, long block, get_block_t *get_block)
01256 {
01257 struct buffer_head tmp;
01258 struct _fcb *inode = mapping->host;
01259 tmp.b_state = 0;
01260 tmp.b_blocknr = 0;
01261 struct _vcb * vcb = exttwo_get_current_vcb();
01262 get_block(vcb, inode, block, &tmp, 0);
01263 return tmp.b_blocknr;
01264 }
01265
01266 int generic_direct_IO(int rw, struct _fcb * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize, get_block_t * get_block)
01267 {
01268 struct _fcb * fcb=e2_search_fcb(inode);
01269 int i, nr_blocks, retval=0;
01270 unsigned long * blocks = iobuf->blocks;
01271 int length;
01272 int sts;
01273 int type;
01274 struct _iosb iosb;
01275 unsigned long iblock;
01276
01277 length = iobuf->length;
01278 nr_blocks = length / blocksize;
01279 struct _vcb * vcb = exttwo_get_current_vcb();
01280 struct ext2_super_block * sb = vcb->vcb_l_cache;
01281 int i_blkbits = EXT2_BLOCK_SIZE_BITS(sb);
01282 iblock = blocknr << (PAGE_CACHE_SHIFT - i_blkbits);
01283
01284 for (i = 0; i < nr_blocks; i++, blocknr++) {
01285 struct buffer_head bh;
01286
01287 bh.b_state = 0;
01288 bh.b_dev = 0;
01289 bh.b_size = blocksize;
01290
01291 if (fcb)
01292 blocknr=e2_map_vbn(fcb,iblock);
01293 else
01294 blocknr=iblock;
01295 if (blocknr==-1) {
01296 if (rw!=WRITE) goto out;
01297 retval=ext2_get_block(vcb, inode, iblock, &blocknr, 1, fcb);
01298 }
01299
01300 if (retval) {
01301 if (!i)
01302
01303 goto out;
01304 else
01305
01306 break;
01307 }
01308
01309 if (rw == READ) {
01310 if (buffer_new(&bh))
01311 BUG();
01312 if (!buffer_mapped(&bh)) {
01313
01314 blocks[i] = -1UL;
01315 continue;
01316 }
01317 } else {
01318 #if 0
01319 if (buffer_new(&bh))
01320 unmap_underlying_metadata(&bh);
01321 #endif
01322 if (!buffer_mapped(&bh))
01323 BUG();
01324 }
01325 if (rw==READ)
01326 type=IO__READPBLK;
01327 else
01328 type=IO__WRITEPBLK;
01329 struct _ucb * ucb;
01330 if (vcb->vcb_l_rvn) {
01331 struct _rvt * rvt = vcb->vcb_l_rvt;
01332 struct _ucb ** ucblst = rvt->rvt_l_ucblst;
01333 ucb = ucblst[fcb->fcb_b_fid_rvn - 1];
01334 } else
01335 ucb = vcb->vcb_l_rvt;
01336 sts = exe_qiow(EXT2_EF,(unsigned short)x2p->io_channel,type,&iosb,0,0,
01337 bh.b_data,blocksize, bh.b_blocknr*vms_block_factor(i_blkbits),ucb->ucb_w_fill_0,0,0);
01338
01339 }
01340
01341
01342 iobuf->length = i * blocksize;
01343
01344
01345 iobuf->length = length;
01346 out:
01347
01348 return retval;
01349 }
01350
01351
01352
01353
01354
01355
01356 static void end_buffer_io_kiobuf(struct buffer_head *bh, int uptodate)
01357 {
01358 struct kiobuf *kiobuf;
01359
01360 mark_buffer_uptodate(bh, uptodate);
01361
01362 kiobuf = bh->b_private;
01363 unlock_buffer(bh);
01364 end_kio_request(kiobuf, uptodate);
01365 }
01366
01367
01368
01369
01370
01371
01372 static int wait_kio(int rw, int nr, struct buffer_head *bh[], int size)
01373 {
01374 int iosize, err;
01375 int i;
01376 struct buffer_head *tmp;
01377
01378 iosize = 0;
01379 err = 0;
01380
01381 for (i = nr; --i >= 0; ) {
01382 iosize += size;
01383 tmp = bh[i];
01384 if (buffer_locked(tmp)) {
01385
01386 }
01387
01388 if (!buffer_uptodate(tmp)) {
01389
01390
01391
01392 iosize = 0;
01393 err = -EIO;
01394 }
01395 }
01396
01397 if (iosize)
01398 return iosize;
01399 return err;
01400 }
01401
01402
01403
01404
01405
01406
01407
01408
01409
01410
01411
01412
01413
01414 int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
01415 kdev_t dev, unsigned long b[], int size)
01416 {
01417 int err;
01418 int length;
01419 int transferred;
01420 int i;
01421 int bufind;
01422 int pageind;
01423 int bhind;
01424 int offset;
01425 unsigned long blocknr;
01426 struct kiobuf * iobuf = NULL;
01427 struct page * map;
01428 struct buffer_head *tmp, **bhs = NULL;
01429
01430 if (!nr)
01431 return 0;
01432
01433
01434
01435
01436 for (i = 0; i < nr; i++) {
01437 iobuf = iovec[i];
01438 if ((iobuf->offset & (size-1)) ||
01439 (iobuf->length & (size-1)))
01440 return -EINVAL;
01441 if (!iobuf->nr_pages)
01442 panic("brw_kiovec: iobuf not initialised");
01443 }
01444
01445
01446
01447
01448 bufind = bhind = transferred = err = 0;
01449 for (i = 0; i < nr; i++) {
01450 iobuf = iovec[i];
01451 offset = iobuf->offset;
01452 length = iobuf->length;
01453 iobuf->errno = 0;
01454 if (!bhs)
01455 bhs = iobuf->bh;
01456
01457 for (pageind = 0; pageind < iobuf->nr_pages; pageind++) {
01458 map = iobuf->maplist[pageind];
01459 if (!map) {
01460 err = -EFAULT;
01461 goto finished;
01462 }
01463
01464 while (length > 0) {
01465 blocknr = b[bufind++];
01466 if (blocknr == -1UL) {
01467 if (rw == READ) {
01468
01469 memset(kmap(map) + offset, 0, size);
01470 flush_dcache_page(map);
01471 kunmap(map);
01472
01473 transferred += size;
01474 goto skip_block;
01475 } else
01476 BUG();
01477 }
01478 tmp = bhs[bhind++];
01479
01480 tmp->b_size = size;
01481 #if 0
01482 set_bh_page(tmp, map, offset);
01483 #endif
01484 tmp->b_this_page = tmp;
01485
01486 init_buffer(tmp, end_buffer_io_kiobuf, iobuf);
01487 tmp->b_dev = dev;
01488 tmp->b_blocknr = blocknr;
01489 tmp->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req);
01490
01491 if (rw == WRITE) {
01492 set_bit(BH_Uptodate, &tmp->b_state);
01493 clear_bit(BH_Dirty, &tmp->b_state);
01494 } else
01495 set_bit(BH_Uptodate, &tmp->b_state);
01496
01497 atomic_inc(&iobuf->io_count);
01498 vms_submit_bh(rw, tmp);
01499
01500
01501
01502 if (bhind >= KIO_MAX_SECTORS) {
01503 kiobuf_wait_for_io(iobuf);
01504 err = wait_kio(rw, bhind, bhs, size);
01505 if (err >= 0)
01506 transferred += err;
01507 else
01508 goto finished;
01509 bhind = 0;
01510 }
01511
01512 skip_block:
01513 length -= size;
01514 offset += size;
01515
01516 if (offset >= PAGE_SIZE) {
01517 offset = 0;
01518 break;
01519 }
01520 }
01521 }
01522 }
01523
01524
01525 if (bhind) {
01526 kiobuf_wait_for_io(iobuf);
01527 err = wait_kio(rw, bhind, bhs, size);
01528 if (err >= 0)
01529 transferred += err;
01530 else
01531 goto finished;
01532 }
01533
01534 finished:
01535 if (transferred)
01536 return transferred;
01537 return err;
01538 }
01539
01540
01541
01542
01543
01544
01545
01546
01547
01548
01549
01550
01551
01552 int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size)
01553 {
01554 printk("warning, brw_page does nothing\n");
01555 return 0;
01556 }
01557
01558 int block_symlink(struct _fcb *inode, const char *symname, int len)
01559 {
01560 struct page *page = alloc_pages(GFP_KERNEL, 0);
01561 int err = -ENOMEM;
01562 char *kaddr;
01563
01564 if (!page)
01565 goto fail;
01566 err = block_prepare_write2(inode, page, 0, len-1, 0);
01567 if (err)
01568 goto fail_map;
01569 kaddr = page_address(page);
01570 memcpy(kaddr, symname, len-1);
01571 generic_commit_write2(inode, page, 0, len-1, 0);
01572
01573
01574
01575
01576
01577
01578
01579 err = block_read_full_page2(inode, page, 0);
01580
01581 page_cache_release(page);
01582 if (err < 0)
01583 goto fail;
01584 struct _vcb * vcb = exttwo_get_current_vcb();
01585 ext2_sync_inode(vcb, inode);
01586 return 0;
01587 fail_map:
01588 #if 0
01589 UnlockPage(page);
01590 #endif
01591 page_cache_release(page);
01592 fail:
01593 return err;
01594 }
01595
01596
01597
01598
01599 #define BUFFER_BUSY_BITS ((1<<BH_Dirty) | (1<<BH_Lock))
01600 #define buffer_busy(bh) (atomic_read(&(bh)->b_count) | ((bh)->b_state & BUFFER_BUSY_BITS))
01601
01602
01603
01604 void show_buffers(void)
01605 {
01606 #ifdef CONFIG_SMP
01607 struct buffer_head * bh;
01608 int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
01609 int nlist;
01610 static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY", };
01611 #endif
01612
01613 #if 0
01614 printk("Buffer memory: %6dkB\n",
01615 atomic_read(&buffermem_pages) << (PAGE_SHIFT-10));
01616
01617 printk("Cache memory: %6dkB\n",
01618 (atomic_read(&page_cache_size)- atomic_read(&buffermem_pages)) << (PAGE_SHIFT-10));
01619 #endif
01620
01621 #ifdef CONFIG_SMP
01622 #if 0
01623
01624 if (!spin_trylock(&lru_list_lock))
01625 return;
01626 for(nlist = 0; nlist < NR_LIST; nlist++) {
01627 found = locked = dirty = used = lastused = 0;
01628 bh = lru_list[nlist];
01629 if(!bh) continue;
01630
01631 do {
01632 found++;
01633 if (buffer_locked(bh))
01634 locked++;
01635 if (buffer_dirty(bh))
01636 dirty++;
01637 if (atomic_read(&bh->b_count))
01638 used++, lastused = found;
01639 bh = bh->b_next_free;
01640 } while (bh != lru_list[nlist]);
01641 {
01642 int tmp = nr_buffers_type[nlist];
01643 if (found != tmp)
01644 printk("%9s: BUG -> found %d, reported %d\n",
01645 buf_types[nlist], found, tmp);
01646 }
01647 printk("%9s: %d buffers, %lu kbyte, %d used (last=%d), "
01648 "%d locked, %d dirty\n",
01649 buf_types[nlist], found, size_buffers_type[nlist]>>10,
01650 used, lastused, locked, dirty);
01651 }
01652 spin_unlock(&lru_list_lock);
01653 #endif
01654 #endif
01655 }
01656
01657
01658
01659
01660
01661
01662
01663
01664 void __init buffer_init(unsigned long mempages)
01665 {
01666 printk("%%KERNEL-I-ISNOMORE, Linux Buffer-cache is no longer used\n");
01667 }
01668
01669
01670
01671
01672
01673
01674
01675
01676 DECLARE_WAIT_QUEUE_HEAD(bdflush_wait);
01677
01678 int block_sync_page(struct page *page)
01679 {
01680 return 0;
01681 }
01682
01683
01684
01685
01686
01687
01688 asmlinkage long sys_bdflush(int func, long data)
01689 {
01690 if (!capable(CAP_SYS_ADMIN))
01691 return -EPERM;
01692
01693 if (func == 1) {
01694
01695 do_exit(0);
01696 #if 0
01697
01698 int error;
01699 struct mm_struct *user_mm;
01700
01701
01702
01703
01704
01705
01706
01707 user_mm = start_lazy_tlb();
01708 #if 0
01709 error = sync_old_buffers();
01710 #endif
01711 end_lazy_tlb(user_mm);
01712 return error;
01713 #endif
01714 }
01715
01716
01717 if (func >= 2) {
01718 int i = (func-2) >> 1;
01719 if (i >= 0 && i < N_PARAM) {
01720 if ((func & 1) == 0)
01721 return put_user(bdf_prm.data[i], (int*)data);
01722
01723 if (data >= bdflush_min[i] && data <= bdflush_max[i]) {
01724 bdf_prm.data[i] = data;
01725 return 0;
01726 }
01727 }
01728 return -EINVAL;
01729 }
01730
01731
01732
01733
01734
01735 return 0;
01736 }
01737
01738