00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018 #include <linux/config.h>
00019 #include <linux/module.h>
00020 #include <linux/slab.h>
00021 #include <linux/shm.h>
00022 #include <linux/mman.h>
00023 #include <linux/locks.h>
00024 #include <linux/pagemap.h>
00025 #include <linux/swap.h>
00026 #include <linux/smp_lock.h>
00027 #include <linux/blkdev.h>
00028 #include <linux/file.h>
00029 #include <linux/swapctl.h>
00030 #include <linux/init.h>
00031 #include <linux/mm.h>
00032 #include <linux/iobuf.h>
00033 #include <linux/compiler.h>
00034
00035 #include <asm/pgalloc.h>
00036 #include <asm/uaccess.h>
00037 #include <asm/mman.h>
00038
00039 #include <linux/highmem.h>
00040
00041 #include <fcbdef.h>
00042 #include <ipldef.h>
00043 #include <phddef.h>
00044 #include <rdedef.h>
00045 #include <misc_routines.h>
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062 void fastcall set_page_dirty(struct page *page)
00063 {
00064 #if 0
00065 test_and_set_bit(PG_dirty, &page->pfn_l_page_state);
00066 #endif
00067 }
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077 void invalidate_inode_pages(struct inode * inode)
00078 {
00079 }
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090 void truncate_inode_pages(struct address_space * mapping, loff_t lstart)
00091 {
00092 }
00093
00094
00095
00096
00097
00098
00099 void invalidate_inode_pages2(struct address_space * mapping)
00100 {
00101 }
00102
00103
00104
00105
00106
00107 int generic_buffer_fdatasync(struct _fcb *inode, unsigned long start_idx, unsigned long end_idx)
00108 {
00109 int retval=0;
00110
00111 return retval;
00112 }
00113
00114
00115
00116
00117
00118
00119
00120
00121 extern int block_write_full_page2();
00122
00123 int filemap_fdatasync(struct address_space * mapping)
00124 {
00125 int ret = 0;
00126
00127 return ret;
00128 }
00129
00130
00131
00132
00133
00134
00135
00136
00137 int filemap_fdatawait(struct address_space * mapping)
00138 {
00139 int ret = 0;
00140
00141 return ret;
00142 }
00143
00144 #if 0
00145
00146
00147
00148
00149
00150
00151
00152
00153 void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
00154 {
00155 struct _fcb *inode = filp->f_dentry->d_inode;
00156 unsigned long index, offset;
00157 struct page *cached_page;
00158 int error;
00159
00160 cached_page = NULL;
00161 index = *ppos >> PAGE_CACHE_SHIFT;
00162 offset = *ppos & ~PAGE_CACHE_MASK;
00163
00164 for (;;) {
00165 struct page *page;
00166 unsigned long end_index, nr, ret;
00167 struct _fcb * fcb;
00168
00169 end_index = inode->fcb_l_filesize >> PAGE_CACHE_SHIFT;
00170
00171 if (index > end_index)
00172 break;
00173 nr = PAGE_CACHE_SIZE;
00174 if (index == end_index) {
00175 nr = inode->fcb_l_filesize & ~PAGE_CACHE_MASK;
00176 if (nr <= offset)
00177 break;
00178 }
00179
00180 nr = nr - offset;
00181
00182
00183
00184
00185
00186 page = 0;
00187
00188
00189
00190
00191
00192
00193
00194
00195 cached_page = alloc_pages(0, 0);
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205 page = cached_page;
00206 cached_page = NULL;
00207
00208
00209 fcb = e2_search_fcb(filp->f_dentry->d_inode);
00210
00211 error = block_read_full_page2(filp->f_dentry->d_inode, page, index);
00212
00213
00214
00215
00216
00217 #if 0
00218 if (!offset || !filp->f_reada)
00219 mark_page_accessed(page);
00220 #endif
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232 ret = actor(desc, page, offset, nr);
00233 offset += ret;
00234 index += offset >> PAGE_CACHE_SHIFT;
00235 offset &= ~PAGE_CACHE_MASK;
00236
00237 page_cache_release(page);
00238 if (ret == nr && desc->count)
00239 continue;
00240 break;
00241
00242 }
00243
00244 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
00245 filp->f_reada = 1;
00246 if (cached_page)
00247 page_cache_release(cached_page);
00248 #if 0
00249 UPDATE_ATIME(inode);
00250 #endif
00251 }
00252
00253 void do_rms_generic_file_read(struct _fcb * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
00254 {
00255 struct _fcb * fcb = filp;
00256 struct _fcb * inode=0;
00257 unsigned long index, offset;
00258 struct page *cached_page;
00259 int error;
00260
00261 cached_page = NULL;
00262 index = *ppos >> PAGE_CACHE_SHIFT;
00263 offset = *ppos & ~PAGE_CACHE_MASK;
00264
00265 for (;;) {
00266 struct page *page;
00267 unsigned long end_index, nr, ret;
00268
00269 #if 0
00270
00271 if (fcb->fcb_l_fill_5)
00272 end_index = inode->fcb_l_filesize >> PAGE_CACHE_SHIFT;
00273 else
00274 #endif
00275 end_index = 10000;
00276
00277 if (index > end_index)
00278 break;
00279 nr = PAGE_CACHE_SIZE;
00280 if (index == end_index) {
00281 #if 0
00282
00283 if (fcb->fcb_l_fill_5)
00284 nr = inode->fcb_l_filesize & ~PAGE_CACHE_MASK;
00285 else
00286 #endif
00287 nr = ((fcb->fcb_l_efblk << 9) + 0 ) & ~PAGE_CACHE_MASK;
00288 if (nr <= offset)
00289 break;
00290 }
00291
00292 nr = nr - offset;
00293
00294
00295
00296
00297
00298 page = 0;
00299
00300
00301
00302
00303
00304
00305
00306
00307 cached_page = alloc_pages(0, 0);
00308
00309
00310
00311
00312
00313
00314
00315
00316
00317 page = cached_page;
00318 cached_page = NULL;
00319
00320
00321
00322 if (fcb->fcb_l_fill_5)
00323 error = block_read_full_page3(filp, page, index);
00324 else
00325 error = ods2_block_read_full_page3(filp->fcb_l_wlfl, page, index);
00326
00327
00328
00329
00330
00331 #if 0
00332 if (!offset)
00333 mark_page_accessed(page);
00334 #endif
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346 ret = actor(desc, page, offset, nr);
00347 offset += ret;
00348 index += offset >> PAGE_CACHE_SHIFT;
00349 offset &= ~PAGE_CACHE_MASK;
00350
00351 page_cache_release(page);
00352 if (ret == nr && desc->count)
00353 continue;
00354 break;
00355
00356 }
00357
00358 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
00359 if (cached_page)
00360 page_cache_release(cached_page);
00361 #if 0
00362 if (inode)
00363 UPDATE_ATIME(inode);
00364 #endif
00365 }
00366 #endif
00367
00368 static ssize_t generic_file_direct_IO(int rw, struct file * filp, char * buf, size_t count, loff_t offset)
00369 {
00370 ssize_t retval;
00371 int new_iobuf, chunk_size, blocksize_mask, blocksize, blocksize_bits, iosize, progress;
00372 struct kiobuf * iobuf;
00373 struct _fcb * inode = filp->f_dentry->d_inode;
00374
00375 new_iobuf = 0;
00376 iobuf = filp->f_iobuf;
00377 if (test_and_set_bit(0, &filp->f_iobuf_lock)) {
00378
00379
00380
00381
00382 retval = alloc_kiovec(1, &iobuf);
00383 if (retval)
00384 goto out;
00385 new_iobuf = 1;
00386 }
00387
00388 #if 0
00389
00390 blocksize = 1 << inode->i_blkbits;
00391 blocksize_bits = inode->i_blkbits;
00392 #endif
00393 blocksize_mask = blocksize - 1;
00394 chunk_size = KIO_MAX_ATOMIC_IO << 10;
00395
00396 retval = -EINVAL;
00397 if ((offset & blocksize_mask) || (count & blocksize_mask))
00398 goto out_free;
00399
00400
00401
00402
00403
00404
00405 #if 0
00406 if (retval == 0)
00407 retval = fsync_inode_data_buffers(inode);
00408 #endif
00409
00410
00411 if (retval < 0)
00412 goto out_free;
00413
00414 progress = retval = 0;
00415 while (count > 0) {
00416 iosize = count;
00417 if (iosize > chunk_size)
00418 iosize = chunk_size;
00419
00420 retval = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
00421 if (retval)
00422 break;
00423
00424 retval = ext2_direct_IO(rw, inode, iobuf, (offset+progress) >> blocksize_bits, blocksize);
00425
00426 if (rw == READ && retval > 0)
00427 mark_dirty_kiobuf(iobuf, retval);
00428
00429 if (retval >= 0) {
00430 count -= retval;
00431 buf += retval;
00432 progress += retval;
00433 }
00434
00435 unmap_kiobuf(iobuf);
00436
00437 if (retval != iosize)
00438 break;
00439 }
00440
00441 if (progress)
00442 retval = progress;
00443
00444 out_free:
00445 if (!new_iobuf)
00446 clear_bit(0, &filp->f_iobuf_lock);
00447 else
00448 free_kiovec(1, &iobuf);
00449 out:
00450 return retval;
00451 }
00452
00453 int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
00454 {
00455 char *kaddr;
00456 unsigned long left, count = desc->count;
00457
00458 if (size > count)
00459 size = count;
00460
00461 kaddr = kmap(page);
00462 left = __copy_to_user(desc->buf, kaddr + offset, size);
00463 kunmap(page);
00464
00465 if (left) {
00466 size -= left;
00467 desc->error = -EFAULT;
00468 }
00469 desc->count = count - size;
00470 desc->written += size;
00471 desc->buf += size;
00472 return size;
00473 }
00474
00475 #if 0
00476
00477
00478
00479
00480 ssize_t generic_file_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
00481 {
00482 ssize_t retval;
00483
00484 if ((ssize_t) count < 0)
00485 return -EINVAL;
00486
00487 if (filp->f_flags & O_DIRECT)
00488 goto o_direct;
00489
00490 retval = -EFAULT;
00491 if (access_ok(VERIFY_WRITE, buf, count)) {
00492 retval = 0;
00493
00494 if (count) {
00495 read_descriptor_t desc;
00496
00497 desc.written = 0;
00498 desc.count = count;
00499 desc.buf = buf;
00500 desc.error = 0;
00501 do_generic_file_read(filp, ppos, &desc, file_read_actor);
00502
00503 retval = desc.written;
00504 if (!retval)
00505 retval = desc.error;
00506 }
00507 }
00508 out:
00509 return retval;
00510
00511 o_direct:
00512 {
00513 loff_t pos = *ppos, size;
00514 struct _fcb *inode = filp->f_dentry->d_inode;
00515
00516 retval = 0;
00517 if (!count)
00518 goto out;
00519 size = inode->fcb_l_filesize;
00520 if (pos < size) {
00521 if (pos + count > size)
00522 count = size - pos;
00523 retval = generic_file_direct_IO(READ, filp, buf, count, pos);
00524 if (retval > 0)
00525 *ppos = pos + retval;
00526 }
00527 #if 0
00528 UPDATE_ATIME(filp->f_dentry->d_inode);
00529 #endif
00530 goto out;
00531 }
00532 }
00533
00534 ssize_t rms_generic_file_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
00535 {
00536 ssize_t retval;
00537
00538 if ((ssize_t) count < 0)
00539 return -EINVAL;
00540
00541 retval = -EFAULT;
00542 if (access_ok(VERIFY_WRITE, buf, count)) {
00543 retval = 0;
00544
00545 if (count) {
00546 read_descriptor_t desc;
00547
00548 desc.written = 0;
00549 desc.count = count;
00550 desc.buf = buf;
00551 desc.error = 0;
00552 do_rms_generic_file_read(filp, ppos, &desc, file_read_actor);
00553
00554 retval = desc.written;
00555 if (!retval)
00556 retval = desc.error;
00557 }
00558 }
00559 out:
00560 return retval;
00561 }
00562 #endif
00563
00564 static int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset , unsigned long size)
00565 {
00566 ssize_t written;
00567 unsigned long count = desc->count;
00568 struct file *file = (struct file *) desc->buf;
00569
00570 if (size > count)
00571 size = count;
00572
00573 if (file->f_op->sendpage) {
00574 written = file->f_op->sendpage(file, page, offset,
00575 size, &file->f_pos, size<count);
00576 } else {
00577 char *kaddr;
00578 mm_segment_t old_fs;
00579
00580 old_fs = get_fs();
00581 set_fs(KERNEL_DS);
00582
00583 kaddr = kmap(page);
00584 written = file->f_op->write(file, kaddr + offset, size, &file->f_pos);
00585 kunmap(page);
00586
00587 set_fs(old_fs);
00588 }
00589 if (written < 0) {
00590 desc->error = written;
00591 written = 0;
00592 }
00593 desc->count = count - written;
00594 desc->written += written;
00595 return written;
00596 }
00597
00598 asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count)
00599 {
00600 #if 0
00601 ssize_t retval;
00602 struct file * in_file, * out_file;
00603 struct _fcb * in_inode, * out_inode;
00604
00605
00606
00607
00608 retval = -EBADF;
00609 in_file = fget(in_fd);
00610 if (!in_file)
00611 goto out;
00612 if (!(in_file->f_mode & FMODE_READ))
00613 goto fput_in;
00614 retval = -EINVAL;
00615 in_inode = in_file->f_dentry->d_inode;
00616 if (!in_inode)
00617 goto fput_in;
00618 retval = locks_verify_area(FLOCK_VERIFY_READ, in_inode, in_file, in_file->f_pos, count);
00619 if (retval)
00620 goto fput_in;
00621
00622
00623
00624
00625 retval = -EBADF;
00626 out_file = fget(out_fd);
00627 if (!out_file)
00628 goto fput_in;
00629 if (!(out_file->f_mode & FMODE_WRITE))
00630 goto fput_out;
00631 retval = -EINVAL;
00632 if (!out_file->f_op || !out_file->f_op->write)
00633 goto fput_out;
00634 out_inode = out_file->f_dentry->d_inode;
00635 retval = locks_verify_area(FLOCK_VERIFY_WRITE, out_inode, out_file, out_file->f_pos, count);
00636 if (retval)
00637 goto fput_out;
00638
00639 retval = 0;
00640 if (count) {
00641 read_descriptor_t desc;
00642 loff_t pos = 0, *ppos;
00643
00644 retval = -EFAULT;
00645 ppos = &in_file->f_pos;
00646 if (offset) {
00647 if (get_user(pos, offset))
00648 goto fput_out;
00649 ppos = &pos;
00650 }
00651
00652 desc.written = 0;
00653 desc.count = count;
00654 desc.buf = (char *) out_file;
00655 desc.error = 0;
00656 do_generic_file_read(in_file, ppos, &desc, file_send_actor);
00657
00658 retval = desc.written;
00659 if (!retval)
00660 retval = desc.error;
00661 if (offset)
00662 put_user(pos, offset);
00663 }
00664
00665 fput_out:
00666 fput(out_file);
00667 fput_in:
00668 fput(in_file);
00669 out:
00670 return retval;
00671 #else
00672 return -EBADF;
00673 #endif
00674 }
00675
00676 #ifdef __x86_64__
00677 asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t *offset, size_t count)
00678 {
00679 printk("sendfile64 not imp\n");
00680 return -1;
00681 }
00682 #endif
00683
00684 static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr)
00685 {
00686 unsigned long max;
00687
00688
00689 max = 0;
00690 if (index > max)
00691 return 0;
00692 max -= index;
00693 if (nr > max)
00694 nr = max;
00695
00696
00697 max = nr_inactive_pages / 2;
00698 if (nr > max)
00699 nr = max;
00700
00701 while (nr) {
00702
00703 index++;
00704 nr--;
00705 }
00706 return 0;
00707 }
00708
00709 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
00710 {
00711 ssize_t ret;
00712 struct file *file;
00713
00714 ret = -EBADF;
00715 file = fget(fd);
00716 if (file) {
00717 if (file->f_mode & FMODE_READ) {
00718 unsigned long start = offset >> PAGE_CACHE_SHIFT;
00719 unsigned long len = (count + ((long)offset & ~PAGE_CACHE_MASK)) >> PAGE_CACHE_SHIFT;
00720 ret = do_readahead(file, start, len);
00721 }
00722 fput(file);
00723 }
00724 return ret;
00725 }
00726
00727
00728
00729
00730 static inline int filemap_sync_pte(pte_t * ptep, struct _rde *vma,
00731 unsigned long address, unsigned int flags)
00732 {
00733 pte_t pte = *ptep;
00734
00735 if (pte_present(pte)) {
00736 struct page *page = pte_page(pte);
00737 if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
00738 flush_tlb_page2(current->mm, address);
00739 set_page_dirty(page);
00740 }
00741 }
00742 return 0;
00743 }
00744
00745 static inline int filemap_sync_pte_range(pmd_t * pmd,
00746 unsigned long address, unsigned long size,
00747 struct _rde *vma, unsigned long offset, unsigned int flags)
00748 {
00749 pte_t * pte;
00750 unsigned long end;
00751 int error;
00752
00753 if (pmd_none(*pmd))
00754 return 0;
00755 if (pmd_bad(*pmd)) {
00756 pmd_ERROR(*pmd);
00757 pmd_clear(pmd);
00758 return 0;
00759 }
00760 pte = pte_offset(pmd, address);
00761 offset += address & PMD_MASK;
00762 address &= ~PMD_MASK;
00763 end = address + size;
00764 if (end > PMD_SIZE)
00765 end = PMD_SIZE;
00766 error = 0;
00767 do {
00768 error |= filemap_sync_pte(pte, vma, address + offset, flags);
00769 address += PAGE_SIZE;
00770 pte++;
00771 } while (address && (address < end));
00772 return error;
00773 }
00774
00775 static inline int filemap_sync_pmd_range(pud_t * pud,
00776 unsigned long address, unsigned long size,
00777 struct _rde *vma, unsigned long offset, unsigned int flags)
00778 {
00779 pmd_t * pmd;
00780 unsigned long offset_not, end;
00781 int error;
00782
00783 if (pud_none(*pud))
00784 return 0;
00785 if (pud_bad(*pud)) {
00786 pud_ERROR(*pud);
00787 pud_clear(pud);
00788 return 0;
00789 }
00790 pmd = pmd_offset(pud, address);
00791 offset = address & PGDIR_MASK;
00792 address &= ~PGDIR_MASK;
00793 end = address + size;
00794 if (end > PGDIR_SIZE)
00795 end = PGDIR_SIZE;
00796 error = 0;
00797 do {
00798 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
00799 address = (address + PMD_SIZE) & PMD_MASK;
00800 pmd++;
00801 } while (address && (address < end));
00802 return error;
00803 }
00804
00805 static inline int filemap_sync_pud_range(pgd_t * pgd,
00806 unsigned long address, unsigned long size,
00807 struct _rde *vma, unsigned int flags)
00808 {
00809 pud_t * pud;
00810 unsigned long offset, end;
00811 int error;
00812
00813 if (pgd_none(*pgd))
00814 return 0;
00815 if (pgd_bad(*pgd)) {
00816 pgd_ERROR(*pgd);
00817 pgd_clear(pgd);
00818 return 0;
00819 }
00820 pud = pud_offset(pgd, address);
00821 offset = address & PGDIR_MASK;
00822 address &= ~PGDIR_MASK;
00823 end = address + size;
00824 if (end > PGDIR_SIZE)
00825 end = PGDIR_SIZE;
00826 error = 0;
00827 do {
00828 error |= filemap_sync_pmd_range(pud, address, end - address, vma, offset, flags);
00829 address = (address + PUD_SIZE) & PUD_MASK;
00830 pud++;
00831 } while (address && (address < end));
00832 return error;
00833 }
00834
00835 int filemap_sync(struct vm_area_struct * vma, unsigned long address,
00836 size_t size, unsigned int flags)
00837 {
00838 pgd_t * dir;
00839 unsigned long end = address + size;
00840 int error = 0;
00841 struct mm_struct *mm =current->mm;
00842
00843
00844
00845
00846
00847
00848 dir = pgd_offset(mm, address);
00849 flush_cache_range(mm, end - size, end);
00850 if (address >= end)
00851 BUG();
00852 do {
00853 error |= filemap_sync_pud_range(dir, address, end - address, vma, flags);
00854 address = (address + PGDIR_SIZE) & PGDIR_MASK;
00855 dir++;
00856 } while (address && (address < end));
00857 flush_tlb_range(mm, end - size, end);
00858
00859
00860
00861 return error;
00862 }
00863
00864
00865
00866 #undef vm_area_struct
00867 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) {
00868 }
00869 #define vm_area_struct _rde
00870
00871
00872
00873
00874
00875
00876
00877
00878
00879
00880
00881
00882
00883
00884 static int msync_interval(struct _rde * vma,
00885 unsigned long start, unsigned long end, int flags)
00886 {
00887 int ret = 0;
00888 struct file * file = 0;
00889
00890 if (file && (vma->rde_l_flags & VM_SHARED)) {
00891 ret = filemap_sync(vma, start, end-start, flags);
00892
00893 if (!ret && (flags & (MS_SYNC|MS_ASYNC))) {
00894 struct _fcb * inode = file->f_dentry->d_inode;
00895
00896 #if 0
00897 down(&inode->i_sem);
00898 #endif
00899
00900 if (flags & MS_SYNC) {
00901 int err;
00902
00903 if (file->f_op && file->f_op->fsync) {
00904 err = file->f_op->fsync(file, file->f_dentry, 1);
00905 if (err && !ret)
00906 ret = err;
00907 }
00908
00909 if (err && !ret)
00910 ret = err;
00911 }
00912 #if 0
00913 up(&inode->i_sem);
00914 #endif
00915 }
00916 }
00917 return ret;
00918 }
00919
00920 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
00921 {
00922 unsigned long end;
00923 struct _rde * vma;
00924 int unmapped_error, error = -EINVAL;
00925
00926 down_read(¤t->mm->mmap_sem);
00927 if (start & ~PAGE_MASK)
00928 goto out;
00929 len = (len + ~PAGE_MASK) & PAGE_MASK;
00930 end = start + len;
00931 if (end < start)
00932 goto out;
00933 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
00934 goto out;
00935 error = 0;
00936 if (end == start)
00937 goto out;
00938
00939
00940
00941
00942
00943 vma = find_vma(current->pcb_l_phd,start);
00944 unmapped_error = 0;
00945 for (;;) {
00946
00947 error = -EFAULT;
00948 if (!vma)
00949 goto out;
00950
00951 if (start < vma->rde_pq_start_va) {
00952 unmapped_error = -EFAULT;
00953 start = vma->rde_pq_start_va;
00954 }
00955
00956 if (end <= (vma->rde_pq_start_va + vma->rde$q_region_size)) {
00957 if (start < end) {
00958 error = msync_interval(vma, start, end, flags);
00959 if (error)
00960 goto out;
00961 }
00962 error = unmapped_error;
00963 goto out;
00964 }
00965
00966 error = msync_interval(vma, start, (vma->rde_pq_start_va + vma->rde$q_region_size), flags);
00967 if (error)
00968 goto out;
00969 start = (vma->rde_pq_start_va + vma->rde$q_region_size);
00970 vma = 0;
00971 }
00972 out:
00973 up_read(¤t->mm->mmap_sem);
00974 return error;
00975 }
00976
00977 static inline void setup_read_behavior(struct _rde * vma,
00978 int behavior)
00979 {
00980
00981 switch(behavior) {
00982 case MADV_SEQUENTIAL:
00983 vma->rde_l_flags |= VM_SEQ_READ;
00984 break;
00985 case MADV_RANDOM:
00986 vma->rde_l_flags |= VM_RAND_READ;
00987 break;
00988 default:
00989 break;
00990 }
00991 return;
00992 }
00993
00994 static long madvise_fixup_start(struct _rde * vma,
00995 unsigned long end, int behavior)
00996 {
00997 struct _rde * n;
00998 struct mm_struct * mm = current->mm;
00999
01000 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
01001 if (!n)
01002 return -EAGAIN;
01003 *n = *vma;
01004 n->rde_q_region_size = end - (unsigned long)n->rde$pq_start_va;
01005 setup_read_behavior(n, behavior);
01006
01007 #if 0
01008 if (n->vm_file)
01009 get_file(n->vm_file);
01010 if (n->vm_ops && n->vm_ops->open)
01011 n->vm_ops->open(n);
01012 #endif
01013
01014 lock_vma_mappings(vma);
01015 spin_lock(&mm->page_table_lock);
01016 vma->rde_pq_start_va = end;
01017
01018 insrde(n,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
01019 spin_unlock(&mm->page_table_lock);
01020 unlock_vma_mappings(vma);
01021 return 0;
01022 }
01023
01024 static long madvise_fixup_end(struct _rde * vma,
01025 unsigned long start, int behavior)
01026 {
01027 struct _rde * n;
01028 struct mm_struct * mm = current->mm;
01029
01030 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
01031 if (!n)
01032 return -EAGAIN;
01033 *n = *vma;
01034 n->rde_pq_start_va = start;
01035
01036 setup_read_behavior(n, behavior);
01037
01038 #if 0
01039 if (n->vm_file)
01040 get_file(n->vm_file);
01041 if (n->vm_ops && n->vm_ops->open)
01042 n->vm_ops->open(n);
01043 #endif
01044 lock_vma_mappings(vma);
01045 spin_lock(&mm->page_table_lock);
01046 vma->rde_q_region_size = start - (unsigned long)vma->rde$pq_start_va;
01047
01048 insrde(n,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
01049 spin_unlock(&mm->page_table_lock);
01050 unlock_vma_mappings(vma);
01051 return 0;
01052 }
01053
01054 static long madvise_fixup_middle(struct _rde * vma,
01055 unsigned long start, unsigned long end, int behavior)
01056 {
01057 struct _rde * left, * right;
01058 struct mm_struct * mm = current->mm;
01059
01060 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
01061 if (!left)
01062 return -EAGAIN;
01063 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
01064 if (!right) {
01065 kmem_cache_free(vm_area_cachep, left);
01066 return -EAGAIN;
01067 }
01068 *left = *vma;
01069 *right = *vma;
01070 left->rde_q_region_size = start - (unsigned long)left->rde$pq_start_va;
01071 right->rde_pq_start_va = end;
01072
01073
01074
01075 #if 0
01076 if (vma->vm_file)
01077 atomic_add(2, &vma->vm_file->f_count);
01078
01079 if (vma->vm_ops && vma->vm_ops->open) {
01080 vma->vm_ops->open(left);
01081 vma->vm_ops->open(right);
01082 }
01083 #endif
01084
01085
01086 lock_vma_mappings(vma);
01087 spin_lock(&mm->page_table_lock);
01088 vma->rde_pq_start_va = start;
01089 vma->rde_q_region_size = end - start;
01090 setup_read_behavior(vma, behavior);
01091
01092 insrde(left,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
01093
01094 insrde(right,¤t->pcb_l_phd->phd$ps_p0_va_list_flink);
01095 spin_unlock(&mm->page_table_lock);
01096 unlock_vma_mappings(vma);
01097 return 0;
01098 }
01099
01100
01101
01102
01103
01104 static long madvise_behavior(struct _rde * vma,
01105 unsigned long start, unsigned long end, int behavior)
01106 {
01107 int error = 0;
01108
01109
01110 if (current->mm->map_count > MAX_MAP_COUNT)
01111 return -ENOMEM;
01112
01113 if (start == vma->rde_pq_start_va) {
01114 if (end == (vma->rde_pq_start_va + vma->rde$q_region_size)) {
01115 setup_read_behavior(vma, behavior);
01116
01117 } else
01118 error = madvise_fixup_start(vma, end, behavior);
01119 } else {
01120 if (end == (vma->rde_pq_start_va + vma->rde$q_region_size))
01121 error = madvise_fixup_end(vma, start, behavior);
01122 else
01123 error = madvise_fixup_middle(vma, start, end, behavior);
01124 }
01125
01126 return error;
01127 }
01128
01129
01130
01131
01132
01133 static long madvise_willneed(struct _rde * vma,
01134 unsigned long start, unsigned long end)
01135 {
01136 long error = -EBADF;
01137 struct file * file;
01138 unsigned long size, rlim_rss;
01139
01140
01141 #if 0
01142 if (!vma->vm_file)
01143 return error;
01144 file = vma->vm_file;
01145 size = (file->f_dentry->d_inode->fcb_l_filesize + PAGE_CACHE_SIZE - 1) >>
01146 PAGE_CACHE_SHIFT;
01147 #endif
01148
01149 start = ((start - (unsigned long)vma->rde_pq_start_va) >> PAGE_SHIFT);
01150 if (end > (vma->rde_pq_start_va + vma->rde$q_region_size))
01151 end = (vma->rde_pq_start_va + vma->rde$q_region_size);
01152 end = ((end - (unsigned long)vma->rde_pq_start_va) >> PAGE_SHIFT);
01153
01154
01155 error = -EIO;
01156 rlim_rss = current->rlim ? current->rlim[RLIMIT_RSS].rlim_cur :
01157 LONG_MAX;
01158 if ((current->mm->rss + (end - start)) > rlim_rss)
01159 return error;
01160
01161
01162 #if 0
01163 if (0) {
01164
01165 start = CLUSTER_OFFSET(start);
01166 end = CLUSTER_OFFSET(end + CLUSTER_PAGES - 1);
01167
01168 while ((start < end) && (start < size)) {
01169 error = read_cluster_nonblocking(file, start, size);
01170 start += CLUSTER_PAGES;
01171 if (error < 0)
01172 break;
01173 }
01174 } else {
01175 while ((start < end) && (start < size)) {
01176 error = 0;
01177 start++;
01178 if (error < 0)
01179 break;
01180 }
01181 }
01182
01183
01184 run_task_queue(&tq_disk);
01185 #endif
01186
01187 return error;
01188 }
01189
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208
01209 static long madvise_dontneed(struct _rde * vma,
01210 unsigned long start, unsigned long end)
01211 {
01212 if (vma->rde_l_flags & VM_LOCKED)
01213 return -EINVAL;
01214
01215 zap_page_range(current->mm, start, end - start);
01216 return 0;
01217 }
01218
01219 static long madvise_vma(struct _rde * vma, unsigned long start,
01220 unsigned long end, int behavior)
01221 {
01222 long error = -EBADF;
01223
01224 switch (behavior) {
01225 case MADV_NORMAL:
01226 case MADV_SEQUENTIAL:
01227 case MADV_RANDOM:
01228 error = madvise_behavior(vma, start, end, behavior);
01229 break;
01230
01231 case MADV_WILLNEED:
01232 error = madvise_willneed(vma, start, end);
01233 break;
01234
01235 case MADV_DONTNEED:
01236 error = madvise_dontneed(vma, start, end);
01237 break;
01238
01239 default:
01240 error = -EINVAL;
01241 break;
01242 }
01243
01244 return error;
01245 }
01246
01247
01248
01249
01250
01251
01252
01253
01254
01255
01256
01257
01258
01259
01260
01261
01262
01263
01264
01265
01266
01267
01268
01269
01270
01271
01272
01273
01274
01275
01276
01277
01278
01279
01280
01281 asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior)
01282 {
01283 unsigned long end;
01284 struct _rde * vma;
01285 int unmapped_error = 0;
01286 int error = -EINVAL;
01287
01288 down_write(¤t->mm->mmap_sem);
01289
01290 if (start & ~PAGE_MASK)
01291 goto out;
01292 len = (len + ~PAGE_MASK) & PAGE_MASK;
01293 end = start + len;
01294 if (end < start)
01295 goto out;
01296
01297 error = 0;
01298 if (end == start)
01299 goto out;
01300
01301
01302
01303
01304
01305
01306 vma = find_vma(current->pcb_l_phd,start);
01307 for (;;) {
01308
01309 error = -ENOMEM;
01310 if (!vma)
01311 goto out;
01312
01313
01314 if (start < vma->rde_pq_start_va) {
01315 unmapped_error = -ENOMEM;
01316 start = vma->rde_pq_start_va;
01317 }
01318
01319
01320 if (end <= (vma->rde_pq_start_va + vma->rde$q_region_size)) {
01321 if (start < end) {
01322 error = madvise_vma(vma, start, end,
01323 behavior);
01324 if (error)
01325 goto out;
01326 }
01327 error = unmapped_error;
01328 goto out;
01329 }
01330
01331
01332 error = madvise_vma(vma, start, (vma->rde_pq_start_va + vma->rde$q_region_size), behavior);
01333 if (error)
01334 goto out;
01335 start = (vma->rde_pq_start_va + vma->rde$q_region_size);
01336 vma = 0;
01337 }
01338
01339 out:
01340 up_write(¤t->mm->mmap_sem);
01341 return error;
01342 }
01343
01344
01345
01346
01347
01348
01349
01350 static unsigned char mincore_page(struct _rde * vma,
01351 unsigned long pgoff)
01352 {
01353 unsigned char present = 0;
01354 struct address_space * as = 0;
01355 struct page * page;
01356
01357 page = 0;
01358 #if 0
01359 if ((page) && (Page_Uptodate(page)))
01360 #endif
01361 present = 1;
01362
01363 return present;
01364 }
01365
01366 static long mincore_vma(struct _rde * vma,
01367 unsigned long start, unsigned long end, unsigned char * vec)
01368 {
01369 long error, i, remaining;
01370 unsigned char * tmp;
01371
01372 error = -ENOMEM;
01373 #if 0
01374 if (!vma->vm_file)
01375 return error;
01376 #endif
01377
01378 start = ((start - (unsigned long)vma->rde_pq_start_va) >> PAGE_SHIFT);
01379 if (end > (vma->rde_pq_start_va + vma->rde$q_region_size))
01380 end = (vma->rde_pq_start_va + vma->rde$q_region_size);
01381 end = ((end - (unsigned long)vma->rde_pq_start_va) >> PAGE_SHIFT);
01382
01383 error = -EAGAIN;
01384 tmp = (unsigned char *) __get_free_page(GFP_KERNEL);
01385 if (!tmp)
01386 return error;
01387
01388
01389 remaining = (end - start),
01390
01391 error = 0;
01392 for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) {
01393 int j = 0;
01394 long thispiece = (remaining < PAGE_SIZE) ?
01395 remaining : PAGE_SIZE;
01396
01397 while (j < thispiece)
01398 tmp[j++] = mincore_page(vma, start++);
01399
01400 if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) {
01401 error = -EFAULT;
01402 break;
01403 }
01404 }
01405
01406 free_page((unsigned long) tmp);
01407 return error;
01408 }
01409
01410
01411
01412
01413
01414
01415
01416
01417
01418
01419
01420
01421
01422
01423
01424
01425
01426
01427
01428
01429
01430
01431
01432
01433
01434
01435 asmlinkage long sys_mincore(unsigned long start, size_t len,
01436 unsigned char * vec)
01437 {
01438 int index = 0;
01439 unsigned long end;
01440 struct _rde * vma;
01441 int unmapped_error = 0;
01442 long error = -EINVAL;
01443
01444 down_read(¤t->mm->mmap_sem);
01445
01446 if (start & ~PAGE_CACHE_MASK)
01447 goto out;
01448 len = (len + ~PAGE_CACHE_MASK) & PAGE_CACHE_MASK;
01449 end = start + len;
01450 if (end < start)
01451 goto out;
01452
01453 error = 0;
01454 if (end == start)
01455 goto out;
01456
01457
01458
01459
01460
01461
01462 vma = find_vma(current->pcb_l_phd,start);
01463 for (;;) {
01464
01465 error = -ENOMEM;
01466 if (!vma)
01467 goto out;
01468
01469
01470 if (start < vma->rde_pq_start_va) {
01471 unmapped_error = -ENOMEM;
01472 start = vma->rde_pq_start_va;
01473 }
01474
01475
01476 if (end <= (vma->rde_pq_start_va + vma->rde$q_region_size)) {
01477 if (start < end) {
01478 error = mincore_vma(vma, start, end,
01479 &vec[index]);
01480 if (error)
01481 goto out;
01482 }
01483 error = unmapped_error;
01484 goto out;
01485 }
01486
01487
01488 error = mincore_vma(vma, start, (vma->rde_pq_start_va + vma->rde$q_region_size), &vec[index]);
01489 if (error)
01490 goto out;
01491 index += ((unsigned long)(vma->rde_pq_start_va + vma->rde$q_region_size) - start) >> PAGE_CACHE_SHIFT;
01492 start = (vma->rde_pq_start_va + vma->rde$q_region_size);
01493 vma = 0;
01494 }
01495
01496 out:
01497 up_read(¤t->mm->mmap_sem);
01498 return error;
01499 }
01500
01501 inline void remove_suid(struct _fcb *inode)
01502 {
01503 #if 0
01504 unsigned int mode;
01505
01506
01507 mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
01508
01509
01510 mode &= inode->i_mode;
01511 if (mode && !capable(CAP_FSETID)) {
01512 inode->i_mode &= ~mode;
01513 mark_inode_dirty(inode);
01514 }
01515 #endif
01516 }
01517
01518
01519
01520
01521
01522
01523
01524
01525
01526
01527
01528
01529
01530
01531
01532
01533 ssize_t
01534 generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
01535 {
01536 struct _fcb *inode = file->f_dentry->d_inode;
01537 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
01538 loff_t pos;
01539 struct page *page, *cached_page;
01540 ssize_t written;
01541 long status = 0;
01542 int err;
01543 unsigned bytes;
01544
01545 if ((ssize_t) count < 0)
01546 return -EINVAL;
01547
01548 if (!access_ok(VERIFY_READ, buf, count))
01549 return -EFAULT;
01550
01551 cached_page = NULL;
01552
01553 #if 0
01554 down(&inode->i_sem);
01555 #endif
01556
01557 pos = *ppos;
01558 err = -EINVAL;
01559 if (pos < 0)
01560 goto out;
01561
01562 err = file->f_error;
01563 if (err) {
01564 file->f_error = 0;
01565 goto out;
01566 }
01567
01568 written = 0;
01569
01570
01571 if ( file->f_flags & O_APPEND)
01572 pos = inode->fcb_l_filesize;
01573
01574
01575
01576
01577 err = -EFBIG;
01578
01579 if (limit != RLIM_INFINITY) {
01580 if (pos >= limit) {
01581 send_sig(SIGXFSZ, current, 0);
01582 goto out;
01583 }
01584 if (pos > 0xFFFFFFFFULL || count > limit - (u32)pos) {
01585
01586 count = limit - (u32)pos;
01587 }
01588 }
01589
01590
01591
01592
01593 if ( pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
01594 if (pos >= MAX_NON_LFS) {
01595 send_sig(SIGXFSZ, current, 0);
01596 goto out;
01597 }
01598 if (count > MAX_NON_LFS - (u32)pos) {
01599
01600 count = MAX_NON_LFS - (u32)pos;
01601 }
01602 }
01603
01604
01605
01606
01607
01608
01609
01610
01611
01612
01613
01614 if (1) {
01615 #if 0
01616
01617 if (pos >= inode->i_sb->s_maxbytes)
01618 {
01619 if (count || pos > inode->i_sb->s_maxbytes) {
01620 send_sig(SIGXFSZ, current, 0);
01621 err = -EFBIG;
01622 goto out;
01623 }
01624
01625 }
01626
01627 if (pos + count > inode->i_sb->s_maxbytes)
01628 count = inode->i_sb->s_maxbytes - pos;
01629 #endif
01630 } else {
01631 #if 0
01632
01633 if (is_read_only(inode->i_rdev)) {
01634 err = -EPERM;
01635 goto out;
01636 }
01637 #endif
01638 if (pos >= inode->fcb_l_filesize) {
01639 if (count || pos > inode->fcb_l_filesize) {
01640 err = -ENOSPC;
01641 goto out;
01642 }
01643 }
01644
01645 if (pos + count > inode->fcb_l_filesize)
01646 count = inode->fcb_l_filesize - pos;
01647 }
01648
01649 err = 0;
01650 if (count == 0)
01651 goto out;
01652
01653 remove_suid(inode);
01654 #if 0
01655 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
01656 #endif
01657 mark_inode_dirty_sync(inode);
01658
01659 if (file->f_flags & O_DIRECT)
01660 goto o_direct;
01661
01662 do {
01663 unsigned long index, offset;
01664 long page_fault;
01665 char *kaddr;
01666
01667
01668
01669
01670
01671 offset = (pos & (PAGE_CACHE_SIZE -1));
01672 index = pos >> PAGE_CACHE_SHIFT;
01673 bytes = PAGE_CACHE_SIZE - offset;
01674 if (bytes > count)
01675 bytes = count;
01676
01677
01678
01679
01680
01681
01682
01683 { volatile unsigned char dummy;
01684 __get_user(dummy, buf);
01685 __get_user(dummy, buf+bytes-1);
01686 }
01687
01688 status = -ENOMEM;
01689 page = alloc_page(0);
01690 if (!page)
01691 break;
01692
01693 #if 0
01694
01695 if (!PageLocked(page)) {
01696 PAGE_BUG(page);
01697 }
01698 #endif
01699
01700 kaddr = kmap(page);
01701 status = block_prepare_write2(inode, page, offset, offset+bytes, index);
01702 if (status)
01703 goto sync_failure;
01704 page_fault = __copy_from_user(kaddr+offset, buf, bytes);
01705 flush_dcache_page(page);
01706 status = generic_commit_write2(inode, page, offset, offset+bytes, index);
01707 if (page_fault)
01708 goto fail_write;
01709 if (!status)
01710 status = bytes;
01711
01712 if (status >= 0) {
01713 written += status;
01714 count -= status;
01715 pos += status;
01716 buf += status;
01717 }
01718 unlock:
01719 kunmap(page);
01720
01721 #if 0
01722 SetPageReferenced(page);
01723 UnlockPage(page);
01724 #endif
01725 page_cache_release(page);
01726
01727 if (status < 0)
01728 break;
01729 } while (count);
01730 done:
01731 *ppos = pos;
01732
01733 if (cached_page)
01734 page_cache_release(cached_page);
01735
01736
01737
01738 #if 0
01739 if (status >= 0) {
01740 if ((file->f_flags & O_SYNC) )
01741 status = generic_osync_inode(inode, OSYNC_METADATA|OSYNC_DATA);
01742 }
01743 #endif
01744
01745 out_status:
01746 err = written ? written : status;
01747 out:
01748
01749 #if 0
01750 up(&inode->i_sem);
01751 #endif
01752 return err;
01753 fail_write:
01754 status = -EFAULT;
01755 goto unlock;
01756
01757 sync_failure:
01758
01759
01760
01761
01762 kunmap(page);
01763 #if 0
01764 UnlockPage(page);
01765 #endif
01766 page_cache_release(page);
01767 if (pos + bytes > inode->fcb_l_filesize)
01768 vmtruncate(inode, inode->fcb_l_filesize);
01769 goto done;
01770
01771 o_direct:
01772 written = generic_file_direct_IO(WRITE, file, (char *) buf, count, pos);
01773 if (written > 0) {
01774 loff_t end = pos + written;
01775 if (end > inode->fcb_l_filesize && 1) {
01776 inode->fcb_l_filesize = end;
01777 mark_inode_dirty(inode);
01778 }
01779 *ppos = end;
01780
01781 }
01782
01783
01784
01785
01786 #if 0
01787 if (written >= 0 && file->f_flags & O_SYNC)
01788 status = generic_osync_inode(inode, OSYNC_METADATA);
01789 #endif
01790 goto out_status;
01791 }
01792
01793 void __init page_cache_init(unsigned long mempages)
01794 {
01795 printk("%%KERNEL-I-ISNOMORE, Linux Page-cache is no longer used\n");
01796 }
01797