#include <linux/config.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <asm/uaccess.h>
Go to the source code of this file.
Classes | |
struct | slab_s |
struct | cpucache_s |
struct | kmem_cache_s |
struct | cache_sizes |
: A string which is used in /proc/slabinfo to identify this cache. | |
kmem_cache_create - Create a cache. : The size of objects to be created in this cache. : The offset to use within the page. : SLAB flags : A constructor for the objects. : A destructor for the objects. Returns a ptr to the cache on success, NULL on failure. Cannot be called within a int, but can be interrupted. The is run when new pages are allocated by the cache and the is run before the pages are handed back. The flags are SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) to catch references to uninitialised memory. SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check for buffer overruns. SLAB_NO_REAP - Don't automatically reap this cache when we're under memory pressure.
SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware cacheline. This can be beneficial if you're counting cycles as closely as davem. | |
#define | drain_cpu_caches(cachep) do { } while (0) |
#define | kmem_cache_alloc_one(cachep) |
#define | CHECK_NR(pg) |
#define | CHECK_PAGE(page) |
kmem_cache_t * | kmem_cache_create (const char *name, size_t size, size_t offset, unsigned long flags, void(*ctor)(void *, kmem_cache_t *, unsigned long), void(*dtor)(void *, kmem_cache_t *, unsigned long)) |
int | kmem_cache_shrink (kmem_cache_t *cachep) |
int | kmem_cache_destroy (kmem_cache_t *cachep) |
void * | kmem_cache_alloc (kmem_cache_t *cachep, int flags) |
void * | kmalloc (size_t size, int flags) |
void | kmem_cache_free (kmem_cache_t *cachep, void *objp) |
void | kfree (const void *objp) |
kmem_cache_t * | kmem_find_general_cachep (size_t size, int gfpflags) |
int fastcall | kmem_cache_reap (int gfp_mask) |
Defines | |
#define | CONFIG_DEBUG_SLAB |
#define | DEBUG 1 |
#define | STATS 1 |
#define | FORCED_DEBUG 1 |
#define | REAP_SCANLEN 10 |
#define | REAP_PERFECT 10 |
#define | BYTES_PER_WORD sizeof(void *) |
#define | CREATE_MASK |
#define | BUFCTL_END 0xffffFFFF |
#define | SLAB_LIMIT 0xffffFFFE |
#define | slab_bufctl(slabp) ((kmem_bufctl_t *)(((slab_t*)slabp)+1)) |
#define | cc_entry(cpucache) ((void **)(((cpucache_t*)(cpucache))+1)) |
#define | cc_data(cachep) ((cachep)->cpudata[smp_processor_id()]) |
#define | CACHE_NAMELEN 20 |
#define | CFLGS_OFF_SLAB 0x010000UL |
#define | CFLGS_OPTIMIZE 0x020000UL |
#define | DFLGS_GROWN 0x000001UL |
#define | OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) |
#define | OPTIMIZE(x) ((x)->flags & CFLGS_OPTIMIZE) |
#define | GROWN(x) ((x)->dlags & DFLGS_GROWN) |
#define | STATS_INC_ACTIVE(x) ((x)->num_active++) |
#define | STATS_DEC_ACTIVE(x) ((x)->num_active--) |
#define | STATS_INC_ALLOCED(x) ((x)->num_allocations++) |
#define | STATS_INC_GROWN(x) ((x)->grown++) |
#define | STATS_INC_REAPED(x) ((x)->reaped++) |
#define | STATS_SET_HIGH(x) |
#define | STATS_INC_ERR(x) ((x)->errors++) |
#define | STATS_INC_ALLOCHIT(x) do { } while (0) |
#define | STATS_INC_ALLOCMISS(x) do { } while (0) |
#define | STATS_INC_FREEHIT(x) do { } while (0) |
#define | STATS_INC_FREEMISS(x) do { } while (0) |
#define | RED_MAGIC1 0x5A2CF071UL |
#define | RED_MAGIC2 0x170FC2A5UL |
#define | POISON_BYTE 0x5a |
#define | POISON_END 0xa5 |
#define | MAX_OBJ_ORDER 5 |
#define | BREAK_GFP_ORDER_HI 2 |
#define | BREAK_GFP_ORDER_LO 1 |
#define | MAX_GFP_ORDER 5 |
#define | SET_PAGE_CACHE(pg, x) ((pg)->list.next = (struct list_head *)(x)) |
#define | GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->list.next) |
#define | SET_PAGE_SLAB(pg, x) ((pg)->list.prev = (struct list_head *)(x)) |
#define | GET_PAGE_SLAB(pg) ((slab_t *)(pg)->list.prev) |
#define | cache_chain (cache_cache.next) |
Typedefs | |
typedef unsigned int | kmem_bufctl_t |
typedef struct slab_s | slab_t |
typedef struct cpucache_s | cpucache_t |
typedef struct cache_sizes | cache_sizes_t |
Functions | |
void __init | kmem_cache_init (void) |
void __init | kmem_cache_sizes_init (void) |
int __init | kmem_cpucache_init (void) |
__initcall (kmem_cpucache_init) |
#define BREAK_GFP_ORDER_HI 2 |
#define BYTES_PER_WORD sizeof(void *) |
#define cache_chain (cache_cache.next) |
Definition at line 380 of file vmsslab.c.
Referenced by kmem_cache_create(), kmem_cache_destroy(), and kmem_cache_init().
#define CACHE_NAMELEN 20 |
#define cc_data | ( | cachep | ) | ((cachep)->cpudata[smp_processor_id()]) |
#define cc_entry | ( | cpucache | ) | ((void **)(((cpucache_t*)(cpucache))+1)) |
#define CFLGS_OPTIMIZE 0x020000UL |
#define CHECK_NR | ( | pg | ) |
#define CHECK_PAGE | ( | page | ) |
Value:
do { \ CHECK_NR(page); \ if (!PageSlab(page)) { \ printk(KERN_ERR "kfree: bad ptr %lxh.\n", \ (unsigned long)objp); \ BUG(); \ } \ } while (0)
Definition at line 1390 of file vmsslab.c.
Referenced by kfree(), and kmem_cache_free().
#define CREATE_MASK |
Value:
(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_HWCACHE_ALIGN | \ SLAB_NO_REAP | SLAB_CACHE_DMA | \ SLAB_MUST_HWCACHE_ALIGN)
Definition at line 117 of file vmsslab.c.
Referenced by kmem_cache_create().
#define DFLGS_GROWN 0x000001UL |
#define GET_PAGE_CACHE | ( | pg | ) | ((kmem_cache_t *)(pg)->list.next) |
#define kmem_cache_alloc_one | ( | cachep | ) |
Value:
({ \ struct list_head * slabs_partial, * entry; \ slab_t *slabp; \ \ slabs_partial = &(cachep)->slabs_partial; \ entry = slabs_partial->next; \ if (unlikely(entry == slabs_partial)) { \ struct list_head * slabs_free; \ slabs_free = &(cachep)->slabs_free; \ entry = slabs_free->next; \ if (unlikely(entry == slabs_free)) \ goto alloc_new_slab; \ list_del(entry); \ list_add(entry, slabs_partial); \ } \ \ slabp = list_entry(entry, slab_t, list); \ kmem_cache_alloc_one_tail(cachep, slabp); \ })
#define MAX_GFP_ORDER 5 |
#define MAX_OBJ_ORDER 5 |
#define OFF_SLAB | ( | x | ) | ((x)->flags & CFLGS_OFF_SLAB) |
#define REAP_PERFECT 10 |
#define REAP_SCANLEN 10 |
#define SET_PAGE_CACHE | ( | pg, | |||
x | ) | ((pg)->list.next = (struct list_head *)(x)) |
#define SET_PAGE_SLAB | ( | pg, | |||
x | ) | ((pg)->list.prev = (struct list_head *)(x)) |
#define slab_bufctl | ( | slabp | ) | ((kmem_bufctl_t *)(((slab_t*)slabp)+1)) |
#define STATS_INC_REAPED | ( | x | ) | ((x)->reaped++) |
#define STATS_SET_HIGH | ( | x | ) |
typedef struct cache_sizes cache_sizes_t |
typedef struct cpucache_s cpucache_t |
typedef unsigned int kmem_bufctl_t |
__initcall | ( | kmem_cpucache_init | ) |
void kfree | ( | const void * | objp | ) |
kfree - free previously allocated memory : pointer returned by kmalloc.
Don't free memory not originally allocated by kmalloc() or you will run into trouble.
Definition at line 1587 of file vmsslab.c.
References CHECK_PAGE, and GET_PAGE_CACHE.
Referenced by __brelse(), bufpost(), com_std_drvdealmem(), com_std_flushattns(), dealloc_tast(), del_sym(), exe_enq(), exe_imgact(), exe_std_iorsnwait(), exe_std_rmvtimq(), get_vm_area(), kfreebuf(), kmem_cache_destroy(), lck_deqlock(), lnm_init_prc(), lnm_searchlog(), mb_fdt_read(), mb_fdt_write(), mmg_imgreset(), mmg_purgempl(), sch_astdel(), vfree(), vms_free_irq(), and vms_request_irq().
void* kmalloc | ( | size_t | size, | |
int | flags | |||
) |
kmalloc - allocate memory : how many bytes of memory are required. : the type of memory to allocate.
kmalloc is the normal method of allocating memory in the kernel.
The argument may be one of:
GFP_USER - Allocate memory on behalf of user. May sleep.
GFP_KERNEL - Allocate normal kernel ram. May sleep.
GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
Additionally, the GFP_DMA flag may be set to indicate the memory must be suitable for DMA. This can mean different things on different platforms. For example, on i386, it means that the memory must come from the first 16MB.
Definition at line 1545 of file vmsslab.c.
References cache_sizes::cs_cachep, cache_sizes::cs_dmacachep, and cache_sizes::cs_size.
Referenced by add_sym(), bread(), com_std_setattnast(), com_std_setctrlast(), exe_create_region_32(), exe_creprc(), exe_crmpsc(), exe_dclast(), exe_enq(), exe_forcex(), exe_imgact(), exe_qio(), exe_schdwk(), exe_setimr(), exe_std_debit_bytcnt_alo(), exe_std_wrtmailbox(), get_vm_area(), getblk(), ioc_std_copy_mscp_ucb(), ioc_std_copy_ucb(), lck_snd_granted(), lnm_init_prc(), mb_fdt_read(), mb_fdt_write(), mb_iodb_vmsinit(), mmg_purgempl(), search_log_prc(), search_log_sys(), vms_init3(), and vms_request_irq().
void* kmem_cache_alloc | ( | kmem_cache_t * | cachep, | |
int | flags | |||
) |
kmem_cache_alloc - Allocate an object : The cache to allocate from. : See kmalloc().
Allocate an object from this cache. The flags are only relevant if the cache has no available objects.
Definition at line 1519 of file vmsslab.c.
Referenced by do_brk(), do_mmap_pgoff(), do_munmap(), and kmem_cache_create().
kmem_cache_t* kmem_cache_create | ( | const char * | name, | |
size_t | size, | |||
size_t | offset, | |||
unsigned long | flags, | |||
void(*)(void *, kmem_cache_t *, unsigned long) | ctor, | |||
void(*)(void *, kmem_cache_t *, unsigned long) | dtor | |||
) |
Definition at line 632 of file vmsslab.c.
References BYTES_PER_WORD, cache_chain, CACHE_NAMELEN, CFLGS_OPTIMIZE, CREATE_MASK, kmem_cache_alloc(), kmem_cache_free(), kmem_find_general_cachep(), MAX_GFP_ORDER, and MAX_OBJ_ORDER.
Referenced by kmem_cache_sizes_init().
int kmem_cache_destroy | ( | kmem_cache_t * | cachep | ) |
kmem_cache_destroy - delete a cache : the cache to destroy
Remove a kmem_cache_t object from the slab cache. Returns 0 on success.
It is expected this function will be called by a module when it is unloaded. This will remove the cache completely, and avoid a duplicate cache being allocated each time a module is loaded and unloaded, if the module doesn't have persistent in-kernel storage across loads and unloads.
The caller must guarantee that noone will allocate memory from the cache during the kmem_cache_destroy().
Definition at line 987 of file vmsslab.c.
References cache_chain, kfree(), and kmem_cache_free().
void kmem_cache_free | ( | kmem_cache_t * | cachep, | |
void * | objp | |||
) |
kmem_cache_free - Deallocate an object : The cache the allocation was from. : The previously allocated object.
Free an object which was previously allocated from this cache.
Definition at line 1566 of file vmsslab.c.
References CHECK_PAGE, and GET_PAGE_CACHE.
Referenced by do_mmap_pgoff(), do_munmap(), kmem_cache_create(), kmem_cache_destroy(), and sys_mprotect().
void __init kmem_cache_init | ( | void | ) |
int fastcall kmem_cache_reap | ( | int | gfp_mask | ) |
kmem_cache_reap - Reclaim memory from caches. : the type of memory required.
Called from do_try_to_free_pages() and __alloc_pages()
Definition at line 1719 of file vmsslab.c.
References cpucache_s::avail, cc_data, cc_entry, DFLGS_GROWN, slab_s::inuse, slab_s::list, REAP_PERFECT, REAP_SCANLEN, and STATS_INC_REAPED.
int kmem_cache_shrink | ( | kmem_cache_t * | cachep | ) |
void __init kmem_cache_sizes_init | ( | void | ) |
Definition at line 442 of file vmsslab.c.
References BREAK_GFP_ORDER_HI, cache_sizes::cs_cachep, cache_sizes::cs_dmacachep, cache_sizes::cs_size, kmem_cache_create(), num_physpages, and OFF_SLAB.
kmem_cache_t* kmem_find_general_cachep | ( | size_t | size, | |
int | gfpflags | |||
) |
Definition at line 1601 of file vmsslab.c.
References cache_sizes::cs_cachep, cache_sizes::cs_dmacachep, and cache_sizes::cs_size.
Referenced by kmem_cache_create().