00001
00002
00003
00004
00005
00006
00007 #include <linux/config.h>
00008 #include <linux/mm.h>
00009 #include <linux/swap.h>
00010 #include <linux/swapctl.h>
00011 #include <linux/interrupt.h>
00012 #include <linux/pagemap.h>
00013 #include <linux/bootmem.h>
00014 #include <linux/slab.h>
00015 #include <linux/compiler.h>
00016
00017 #include <ssdef.h>
00018 #include <system_data_cells.h>
00019 #include <mmgdef.h>
00020 #include <irpdef.h>
00021 #include <tqedef.h>
00022 #include <dyndef.h>
00023 #include <cebdef.h>
00024 #include <misc_routines.h>
00025 #include <mmg_routines.h>
00026 #include <exe_routines.h>
00027 #include <ipl.h>
00028 #include <ipldef.h>
00029 #include <internals.h>
00030
00031 #undef VMS_MM_DEBUG
00032 #define VMS_MM_DEBUG
00033
00034 #undef OLDINT
00035 #define OLDINT
00036
00037 #ifdef __x86_64__
00038 #undef OLDINT
00039 #endif
00040
00041 #if 0
00042
00043 static const int irpsize = ((sizeof (struct _irp)>>4)+1)<<4;
00044 static const int irpmin = (( (((sizeof (struct _irp)>>4)+1)<<4) >> (4+1))<<4)+1;
00045 static const int srpsize = (( (((sizeof (struct _irp)>>4)+1)<<4) \
00046 >> (4+1))<<4);
00047 static const int lrpsize = 4096;
00048 static const int lrpmin = 2048;
00049
00050 int exe_gl_lrpsplit;
00051 int ioc_gl_lrpsplit;
00052 int exe_gl_srpsplit;
00053 int ioc_gl_srpsplit;
00054 int exe_gl_splitadr;
00055 int ioc_gl_splitadr;
00056 unsigned long long ioc_gq_irpiq;
00057 unsigned long long ioc_gq_lrpiq;
00058 unsigned long long ioc_gq_srpiq;
00059 #endif
00060
00061 #ifdef CONFIG_VMS
00062
00063 zone_t thezone = {
00064 lock: SPIN_LOCK_UNLOCKED
00065 };
00066
00067 int exe_alophycntg(unsigned long * va, unsigned long num) {
00068 signed long firstpfn;
00069
00070 firstpfn=mmg_allocontig_align(num);
00071 if (firstpfn<0) {
00072 return SS__INSFMEM;
00073 }
00074
00075
00076 }
00077
00078 int nr_swap_pages;
00079 int nr_active_pages;
00080 int nr_inactive_pages;
00081 struct list_head inactive_list;
00082 struct list_head active_list;
00083 pg_data_t *pgdat_list;
00084
00085 #define memlist_init(x) INIT_LIST_HEAD(x)
00086 #define memlist_add_head list_add
00087 #define memlist_add_tail list_add_tail
00088 #define memlist_del list_del
00089 #define memlist_entry list_entry
00090 #define memlist_next(x) ((x)->next)
00091 #define memlist_prev(x) ((x)->prev)
00092
00093 extern int in_free_all_bootmem_core;
00094
00095 int memalcdeb=0;
00096
00097 static void FASTCALL(__free_pages_ok (struct page *page, unsigned int order));
00098 static void fastcall __free_pages_ok (struct page *page, unsigned int order)
00099 {
00100 unsigned long index, page_idx, mask, flags;
00101 free_area_t *area;
00102 struct page *base;
00103 zone_t *zone;
00104 unsigned long i,tmp;
00105
00106 zone = &thezone;
00107
00108
00109
00110
00111 #if 0
00112 if (PageLRU(page))
00113 lru_cache_del(page);
00114 #endif
00115
00116 #if 0
00117 if (page->buffers)
00118 BUG();
00119 if (page->mapping)
00120 BUG();
00121 #endif
00122 if (!VALID_PAGE(page))
00123 BUG();
00124 #if 0
00125 if (PageSwapCache(page))
00126 BUG();
00127 if (PageLocked(page))
00128 BUG();
00129 if (PageLRU(page))
00130 BUG();
00131 if (PageActive(page))
00132 BUG();
00133 page->pfn_l_page_state &= ~((1<<PG_referenced) | (1<<PG_dirty));
00134 #endif
00135
00136 mask = (~0UL) << order;
00137 page_idx = page - mem_map;
00138 base = mem_map;
00139
00140 int ipl = vmslock(&SPIN_MMG, IPL__MMG);
00141 spin_lock_irqsave(&zone->lock, flags);
00142
00143 #if 0
00144 while (mask + (1 << (MAX_ORDER-1))) {
00145 struct page *buddy1;
00146 if (memalcdeb) printk("memlistdel at %x %x %x\n",page_idx,mask,(page_idx ^ -mask));
00147 buddy1 = base + (page_idx ^ -mask);
00148 memlist_del(&buddy1->list);
00149 mask <<= 1;
00150 page_idx &= mask;
00151 }
00152 if (memalcdeb) printk("\n");
00153 #endif
00154
00155 if (!in_free_all_bootmem_core)
00156 for(i=0;i<(1 << order);i++,page++) {
00157 #ifdef OLDINT
00158 mmg_dallocpfn(page);
00159 #else
00160 mmg_dallocpfn(page-mem_map);
00161 #endif
00162
00163 }
00164
00165 spin_unlock_irqrestore(&zone->lock, flags);
00166 vmsunlock(&SPIN_MMG, ipl);
00167 }
00168
00169 #ifndef CONFIG_DISCONTIGMEM
00170 struct page * fastcall _alloc_pages(unsigned int gfp_mask, unsigned int order)
00171 {
00172 return __alloc_pages(gfp_mask, order,
00173 contig_page_data.node_zonelists+(gfp_mask & GFP_ZONEMASK));
00174 }
00175 #endif
00176
00177 int inallocpfn=0;
00178
00179
00180
00181
00182 struct page * fastcall __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist)
00183 {
00184 unsigned long flags;
00185 unsigned long min;
00186 zone_t *zone, * classzone = 0;
00187 struct page * page, *tmp;
00188 int freed;
00189 signed long pfn;
00190 unsigned long i;
00191
00192 zone = &thezone;
00193
00194 int ipl = vmslock(&SPIN_MMG, IPL__MMG);
00195 spin_lock_irqsave(&zone->lock, flags);
00196 if (inallocpfn++) panic("mooo\n");
00197 if (order)
00198 pfn=mmg_allocontig_align(1 << order);
00199 else
00200 pfn=mmg_allocpfn();
00201 inallocpfn--;
00202 spin_unlock_irqrestore(&zone->lock, flags);
00203 vmsunlock(&SPIN_MMG, ipl);
00204
00205
00206
00207 if (pfn>=0) {
00208 page=&mem_map[pfn];
00209 for(i=0,tmp=page;i<(1<<order);i++,tmp++)
00210 set_page_count(tmp, 1);
00211 return page;
00212 }
00213
00214 printk("should not be here now\n");
00215
00216 mb();
00217 if (waitqueue_active(&kswapd_wait))
00218 wake_up_interruptible(&kswapd_wait);
00219
00220 rebalance:
00221 try_to_free_pages(classzone, gfp_mask, order);
00222
00223 ipl = vmslock(&SPIN_MMG, IPL__MMG);
00224 spin_lock_irqsave(&zone->lock, flags);
00225 if (inallocpfn++) panic("mooo\n");
00226 if (order)
00227 pfn=mmg_allocontig(1 << order);
00228 else
00229 pfn=mmg_allocpfn();
00230 inallocpfn--;
00231 spin_unlock_irqrestore(&zone->lock, flags);
00232 vmsunlock(&SPIN_MMG, ipl);
00233
00234 if (pfn>=0) {
00235 page=&mem_map[pfn];
00236 for(i=0,tmp=page;i<(1<<order);i++,tmp++)
00237 set_page_count(tmp, 1);
00238 return page;
00239 }
00240
00241 #if 0
00242 current->need_resched=1;
00243 __set_current_state(TASK_RUNNING);
00244 schedule();
00245 #endif
00246 goto rebalance;
00247 }
00248
00249
00250
00251
00252 unsigned long fastcall __get_free_pages(unsigned int gfp_mask, unsigned int order)
00253 {
00254 struct page * page;
00255
00256 page = alloc_pages(gfp_mask, order);
00257 if (!page)
00258 return 0;
00259 return (unsigned long) page_address(page);
00260 }
00261
00262 unsigned long fastcall get_zeroed_page(unsigned int gfp_mask)
00263 {
00264 struct page * page;
00265
00266 page = alloc_pages(gfp_mask, 0);
00267 if (page) {
00268 void *address = page_address(page);
00269 clear_page(address);
00270 return (unsigned long) address;
00271 }
00272 return 0;
00273 }
00274
00275 void fastcall __free_pages(struct page *page, unsigned int order)
00276 {
00277 if (!PageReserved(page) && put_page_testzero(page))
00278 __free_pages_ok(page, order);
00279 }
00280
00281 void fastcall free_pages(unsigned long addr, unsigned int order)
00282 {
00283 if (addr != 0)
00284 __free_pages(virt_to_page(addr), order);
00285 }
00286
00287
00288
00289
00290 unsigned int nr_free_pages (void)
00291 {
00292 unsigned int sum;
00293 zone_t *zone;
00294 pg_data_t *pgdat = pgdat_list;
00295
00296 sum = sch_gl_freecnt;
00297 return sum;
00298 }
00299
00300
00301
00302
00303 unsigned int nr_free_buffer_pages (void)
00304 {
00305 pg_data_t *pgdat = pgdat_list;
00306 unsigned int sum = 0;
00307
00308 return sum;
00309 }
00310
00311 #define K(x) ((x) << (PAGE_SHIFT-10))
00312
00313
00314
00315
00316
00317
00318 void show_free_areas_core(pg_data_t *pgdat)
00319 {
00320 unsigned int order;
00321 unsigned type;
00322 pg_data_t *tmpdat = pgdat;
00323
00324 printk("Free pages: %6dkB (%6dkB HighMem)\n",
00325 K(nr_free_pages()),
00326 K(nr_free_highpages()));
00327
00328 while (tmpdat) {
00329 zone_t *zone;
00330 for (zone = tmpdat->node_zones;
00331 zone < tmpdat->node_zones + MAX_NR_ZONES; zone++)
00332 printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB "
00333 "high:%6lukB\n",
00334 zone->name,
00335 K(zone->free_pages),
00336 K(zone->pages_min),
00337 K(zone->pages_low),
00338 K(zone->pages_high));
00339
00340 tmpdat = tmpdat->node_next;
00341 }
00342
00343 printk("( Active: %d, inactive: %d, free: %d )\n",
00344 nr_active_pages,
00345 nr_inactive_pages,
00346 nr_free_pages());
00347
00348 for (type = 0; type < MAX_NR_ZONES; type++) {
00349 struct list_head *head, *curr;
00350 zone_t *zone = pgdat->node_zones + type;
00351 unsigned long nr, total, flags;
00352
00353 total = 0;
00354 if (zone->size) {
00355 int ipl = vmslock(&SPIN_MMG, IPL__MMG);
00356 spin_lock_irqsave(&zone->lock, flags);
00357 for (order = 0; order < MAX_ORDER; order++) {
00358 head = &(zone->free_area + order)->free_list;
00359 curr = head;
00360 nr = 0;
00361 for (;;) {
00362 curr = memlist_next(curr);
00363 if (curr == head)
00364 break;
00365 nr++;
00366 }
00367 total += nr * (1 << order);
00368 printk("%lu*%lukB ", nr, K(1UL) << order);
00369 }
00370 spin_unlock_irqrestore(&zone->lock, flags);
00371 vmsunlock(&SPIN_MMG, ipl);
00372 }
00373 printk("= %lukB)\n", K(total));
00374 }
00375
00376 #if 0
00377 #ifdef SWAP_CACHE_INFO
00378 show_swap_cache_info();
00379 #endif
00380 #endif
00381 }
00382
00383 void show_free_areas(void)
00384 {
00385 show_free_areas_core(pgdat_list);
00386 }
00387
00388 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
00389
00390
00391
00392
00393
00394
00395
00396 void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
00397 unsigned long *zones_size, unsigned long zone_start_paddr,
00398 unsigned long *zholes_size, struct page *lmem_map)
00399 {
00400 struct page *p;
00401 unsigned long i, j;
00402 unsigned long map_size;
00403 unsigned long totalpages, offset, realtotalpages;
00404 const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
00405
00406 if (zone_start_paddr & ~PAGE_MASK)
00407 BUG();
00408
00409 totalpages = 0;
00410 for (i = 0; i < MAX_NR_ZONES; i++) {
00411 unsigned long size = zones_size[i];
00412 totalpages += size;
00413 }
00414 realtotalpages = totalpages;
00415 INIT_LIST_HEAD(&active_list);
00416 INIT_LIST_HEAD(&inactive_list);
00417
00418
00419
00420
00421
00422
00423
00424
00425 map_size = (totalpages + 1)*sizeof(struct page);
00426 if (lmem_map == (struct page *)0) {
00427 lmem_map = (struct page *) alloc_bootmem_node(pgdat, map_size);
00428 lmem_map = (struct page *)(PAGE_OFFSET +
00429 MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET));
00430 }
00431 *gmap = pgdat->node_mem_map = lmem_map;
00432 pgdat->node_size = totalpages;
00433 pgdat->node_start_paddr = zone_start_paddr;
00434 pgdat->node_start_mapnr = (lmem_map - mem_map);
00435 pgdat->nr_zones = 0;
00436
00437 void __init init_nonpaged(void *pgdat, unsigned long totalpages);
00438 init_nonpaged(pgdat,totalpages);
00439
00440 #if 0
00441
00442 struct _irp * irp;
00443
00444 ioc_gl_lrpsplit=alloc_bootmem_node(pgdat,512*lrpsize);
00445 ioc_gq_lrpiq=exe$gl_lrpsplit;
00446 irp=&ioc_gq_lrpiq;
00447 irp->irp_l_ioqfl=0;
00448 irp->irp_l_ioqbl=0;
00449 for(i=0;i<512;i++) {
00450 void * lrp=(unsigned long)ioc_gl_lrpsplit+i*lrpsize;
00451 boot_insqti(irp,&ioc_gq_lrpiq);
00452 }
00453
00454
00455 exe_gl_splitadr=alloc_bootmem_node(pgdat,512*irpsize);
00456 ioc_gq_irpiq=exe$gl_splitadr;
00457 irp=&ioc_gq_irpiq;
00458 irp->irp_l_ioqfl=0;
00459 irp->irp_l_ioqbl=0;
00460 for(i=0;i<512;i++) {
00461 struct _irp * irp=(unsigned long)exe_gl_splitadr+i*irpsize;
00462 boot_insqti(irp,&ioc_gq_irpiq);
00463 }
00464
00465
00466 exe_gl_srpsplit=alloc_bootmem_node(pgdat,4*512*srpsize);
00467 ioc_gq_srpiq=exe$gl_srpsplit;
00468 irp=&ioc_gq_srpiq;
00469 irp->irp_l_ioqfl=0;
00470 irp->irp_l_ioqbl=0;
00471 for(i=0;i<4*512;i++) {
00472 void * irp=(unsigned long)exe_gl_splitadr+i*sizeof(struct _irp);
00473 boot_insqti(irp,&ioc_gq_irpiq);
00474 }
00475 #endif
00476
00477
00478
00479
00480
00481
00482 for (p = lmem_map; p < lmem_map + totalpages; p++) {
00483 set_page_count(p, 0);
00484 SetPageReserved(p);
00485 #if 0
00486 init_waitqueue_head(&p->wait);
00487 #endif
00488
00489 }
00490
00491 offset = lmem_map - mem_map;
00492 for (i = 0; i < totalpages; i++) {
00493 struct page *page = mem_map + offset + i;
00494 #if 0
00495 page->virtual = __va(zone_start_paddr);
00496 #endif
00497 zone_start_paddr += PAGE_SIZE;
00498 }
00499 }
00500
00501 void __init free_area_init(unsigned long *zones_size)
00502 {
00503 free_area_init_core(0, &contig_page_data, &mem_map, zones_size, 0, 0, 0);
00504 }
00505
00506 static int __init setup_mem_frac(char *str)
00507 {
00508 int j = 0;
00509
00510 printk("setup_mem_frac not done\n");
00511 return 1;
00512 }
00513
00514 __setup("memfrac=", setup_mem_frac);
00515 #endif
00516
00517 int exe_alononpaged() {
00518 printk("alononpaged not implemented\n");
00519 }
00520
00521 int exe_deanonpaged() {
00522 printk("deanonpaged not implemented\n");
00523 }
00524
00525 struct _gen {
00526 struct _gen * gen_l_flink;
00527 #if 0
00528 struct _gen * gen_l_blink;
00529 unsigned short int gen_w_size;
00530 unsigned char gen_b_type;
00531 unsigned char gen_b_subtype;
00532 #else
00533 int gen_w_size;
00534 #endif
00535 int gen_l_poison;
00536 };
00537
00538
00539 int exe_allocate(int requestsize, void ** poolhead, int alignment, unsigned int * allocatedsize, void ** returnblock) {
00540 #if 0
00541 if (requestsize&15)
00542 requestsize=((requestsize>>4)+1)<<4;
00543 #endif
00544 struct _gen * nextnext, * next, * cur = poolhead;
00545 while (cur->gen_l_flink) {
00546 next=cur->gen_l_flink;
00547
00548 if (requestsize<=next->gen_w_size) {
00549 check_packet(next,requestsize,0);
00550 #if 0
00551 poison_packet(next,requestsize,0);
00552 #endif
00553 *allocatedsize=requestsize;
00554 *returnblock=next;
00555 nextnext=next->gen_l_flink;
00556 if (requestsize<next->gen_w_size) {
00557 int newsize=next->gen_w_size-requestsize;
00558 next=(long)next+requestsize;
00559 next->gen_l_flink=nextnext;
00560 next->gen_w_size=newsize;
00561 next->gen_l_poison=0x87654321;
00562 nextnext=next;
00563 }
00564 cur->gen_l_flink=nextnext;
00565 return SS__NORMAL;
00566 }
00567 cur=next;
00568 }
00569 *allocatedsize=0;
00570 *returnblock=0;
00571 return SS__INSFMEM;
00572 }
00573
00574 int exe_deallocate(void * returnblock, void ** poolhead, int size) {
00575 #if 0
00576 if (requestsize&15)
00577 requestsize=((requestsize>>4)+1)<<4;
00578 #endif
00579 struct _gen * middle = returnblock;
00580 struct _gen * nextnext, * next, * cur = poolhead;
00581 while (cur->gen_l_flink && ((unsigned long)cur->gen$l_flink<(unsigned long)returnblock)) {
00582 cur=cur->gen_l_flink;
00583 }
00584
00585 next=cur->gen_l_flink;
00586 nextnext=next->gen_l_flink;
00587
00588 middle->gen_w_size=size;
00589 middle->gen_l_flink=nextnext;
00590 middle->gen_l_poison=0x87654321;
00591
00592 if (next && nextnext && ((unsigned long)next+next->gen_w_size)==(unsigned long)middle && ((unsigned long)middle+middle->gen$w_size)==nextnext) {
00593 next->gen_w_size+=middle->gen$w_size+nextnext->gen$w_size;
00594 next->gen_l_flink=nextnext->gen$l_flink;
00595 poison_packet(next,next->gen_w_size,1);
00596 return SS__NORMAL;
00597 }
00598
00599 if (next && ((unsigned long)next+next->gen_w_size)==(unsigned long)middle) {
00600 next->gen_w_size+=middle->gen$w_size;
00601 next->gen_l_flink=nextnext;
00602 poison_packet(next,next->gen_w_size,1);
00603 return SS__NORMAL;
00604 }
00605
00606 if (next && nextnext && ((unsigned long)middle+middle->gen_w_size)==nextnext) {
00607 middle->gen_w_size+=nextnext->gen$w_size;
00608 next->gen_l_flink=middle;
00609 poison_packet(middle,middle->gen_w_size,1);
00610 return SS__NORMAL;
00611 }
00612
00613 next->gen_l_flink=middle;
00614 middle->gen_l_flink=nextnext;
00615 poison_packet(middle,size,1);
00616
00617 return SS__NORMAL;
00618 }
00619
00620 int exe_std_allocxyz(int *alosize_p, struct _tqe **tqe_p, int type, int size) {
00621 int sts=exe_std_alononpaged(size,alosize_p,tqe_p);
00622 if (sts==SS__NORMAL) {
00623 struct _tqe * tqe=*tqe_p;
00624 tqe->tqe_w_size=*alosize_p;
00625 tqe->tqe_b_type=type;
00626 return sts;
00627 }
00628
00629
00630 }
00631
00632 int exe_std_allocbuf (int reqsize, int *alosize_p, void **bufptr_p) {
00633 return exe_std_allocxyz(alosize_p,bufptr_p,DYN$C_BUFIO,reqsize);
00634 }
00635
00636 int exe_std_allocceb(int *alosize_p, struct _ceb **ceb_p) {
00637 int size=sizeof(struct _ceb);
00638 return exe_std_allocxyz(alosize_p,ceb_p,DYN$C_CEB,size);
00639 }
00640
00641 int exe_std_allocirp(struct _irp **irp_p) {
00642 int alosize_p;
00643 int size=sizeof(struct _irp);
00644 return exe_std_allocxyz(&alosize_p,irp_p,DYN$C_IRP,size);
00645 }
00646
00647 int exe_std_alloctqe(int *alosize_p, struct _tqe **tqe_p) {
00648 int size=sizeof(struct _tqe);
00649 return exe_std_allocxyz(alosize_p,tqe_p,DYN$C_TQE,size);
00650 }
00651
00652 int poison_packet(char * packet, int size, int deall) {
00653 #ifdef VMS_MM_DEBUG
00654 int * l = packet;
00655 char poisonc=0x42;
00656 int poison=0x12345678;
00657 if (deall)
00658 poison=0x87654321;
00659 if (deall)
00660 poisonc=0xbd;
00661 #ifdef __i386__
00662 l[2]=poison;
00663 memset(packet+12,poisonc,size-12);
00664 #else
00665 l[3]=poison;
00666 memset(packet+16,poisonc,size-16);
00667 #endif
00668 #endif
00669 }
00670
00671 int check_packet(char * packet, int size, int deall) {
00672 #ifdef VMS_MM_DEBUG
00673 deall=!deall;
00674 int * l = packet;
00675 char poisonc=0x42;
00676 int poison=0x12345678;
00677 if (deall)
00678 poison=0x87654321;
00679 if (deall)
00680 poisonc=0xbd;
00681 #ifdef __i386__
00682 if (l[2]!=poison)
00683 panic("poison %lx %x != %x\n",l,l[2],poison);
00684 char * c = packet + 12;
00685 size-=12;
00686 #else
00687 if (l[3]!=poison)
00688 panic("poison %lx %x != %x\n",l,l[3],poison);
00689 char * c = packet + 16;
00690 size-=16;
00691 #endif
00692 for(;size;size--,c++)
00693 if (*c!=poisonc) panic("poisonc %x %x %x\n",size,c,poisonc);
00694 #endif
00695 }
00696
00697