Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 | /* * tmem.h * * Transcendent memory * * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp. */ #ifndef _TMEM_H_ #define _TMEM_H_ #include <linux/types.h> #include <linux/highmem.h> #include <linux/hash.h> #include <linux/atomic.h> /* * These are pre-defined by the Xen<->Linux ABI */ #define TMEM_PUT_PAGE 4 #define TMEM_GET_PAGE 5 #define TMEM_FLUSH_PAGE 6 #define TMEM_FLUSH_OBJECT 7 #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 #define TMEM_POOL_PRECOMPRESSED 4 #define TMEM_POOL_PAGESIZE_SHIFT 4 #define TMEM_POOL_PAGESIZE_MASK 0xf #define TMEM_POOL_RESERVED_BITS 0x00ffff00 /* * sentinels have proven very useful for debugging but can be removed * or disabled before final merge. */ #define SENTINELS #ifdef SENTINELS #define DECL_SENTINEL uint32_t sentinel; #define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL) #define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL) #define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL) #define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL) #else #define DECL_SENTINEL #define SET_SENTINEL(_x, _y) do { } while (0) #define INVERT_SENTINEL(_x, _y) do { } while (0) #define ASSERT_SENTINEL(_x, _y) do { } while (0) #define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0) #endif #define ASSERT_SPINLOCK(_l) WARN_ON(!spin_is_locked(_l)) /* * A pool is the highest-level data structure managed by tmem and * usually corresponds to a large independent set of pages such as * a filesystem. Each pool has an id, and certain attributes and counters. * It also contains a set of hash buckets, each of which contains an rbtree * of objects and a lock to manage concurrency within the pool. */ #define TMEM_HASH_BUCKET_BITS 8 #define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS) struct tmem_hashbucket { struct rb_root obj_rb_root; spinlock_t lock; }; struct tmem_pool { void *client; /* "up" for some clients, avoids table lookup */ struct list_head pool_list; uint32_t pool_id; bool persistent; bool shared; atomic_t obj_count; atomic_t refcount; struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS]; DECL_SENTINEL }; #define is_persistent(_p) (_p->persistent) #define is_ephemeral(_p) (!(_p->persistent)) /* * An object id ("oid") is large: 192-bits (to ensure, for example, files * in a modern filesystem can be uniquely identified). */ struct tmem_oid { uint64_t oid[3]; }; static inline void tmem_oid_set_invalid(struct tmem_oid *oidp) { oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL; } static inline bool tmem_oid_valid(struct tmem_oid *oidp) { return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL || oidp->oid[2] != -1UL; } static inline int tmem_oid_compare(struct tmem_oid *left, struct tmem_oid *right) { int ret; if (left->oid[2] == right->oid[2]) { if (left->oid[1] == right->oid[1]) { if (left->oid[0] == right->oid[0]) ret = 0; else if (left->oid[0] < right->oid[0]) ret = -1; else return 1; } else if (left->oid[1] < right->oid[1]) ret = -1; else ret = 1; } else if (left->oid[2] < right->oid[2]) ret = -1; else ret = 1; return ret; } static inline unsigned tmem_oid_hash(struct tmem_oid *oidp) { return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2], TMEM_HASH_BUCKET_BITS); } /* * A tmem_obj contains an identifier (oid), pointers to the parent * pool and the rb_tree to which it belongs, counters, and an ordered * set of pampds, structured in a radix-tree-like tree. The intermediate * nodes of the tree are called tmem_objnodes. */ struct tmem_objnode; struct tmem_obj { struct tmem_oid oid; struct tmem_pool *pool; struct rb_node rb_tree_node; struct tmem_objnode *objnode_tree_root; unsigned int objnode_tree_height; unsigned long objnode_count; long pampd_count; void *extra; /* for private use by pampd implementation */ DECL_SENTINEL }; #define OBJNODE_TREE_MAP_SHIFT 6 #define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT) #define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1) #define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) #define OBJNODE_TREE_MAX_PATH \ (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2) struct tmem_objnode { struct tmem_obj *obj; DECL_SENTINEL void *slots[OBJNODE_TREE_MAP_SIZE]; unsigned int slots_in_use; }; /* pampd abstract datatype methods provided by the PAM implementation */ struct tmem_pamops { void *(*create)(char *, size_t, bool, int, struct tmem_pool *, struct tmem_oid *, uint32_t); int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *, struct tmem_oid *, uint32_t); int (*get_data_and_free)(char *, size_t *, bool, void *, struct tmem_pool *, struct tmem_oid *, uint32_t); void (*free)(void *, struct tmem_pool *, struct tmem_oid *, uint32_t); void (*free_obj)(struct tmem_pool *, struct tmem_obj *); bool (*is_remote)(void *); void (*new_obj)(struct tmem_obj *); int (*replace_in_obj)(void *, struct tmem_obj *); }; extern void tmem_register_pamops(struct tmem_pamops *m); /* memory allocation methods provided by the host implementation */ struct tmem_hostops { struct tmem_obj *(*obj_alloc)(struct tmem_pool *); void (*obj_free)(struct tmem_obj *, struct tmem_pool *); struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *); void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *); }; extern void tmem_register_hostops(struct tmem_hostops *m); /* core tmem accessor functions */ extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index, char *, size_t, bool, bool); extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index, char *, size_t *, bool, int); extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index, void *); extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *, uint32_t index); extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *); extern int tmem_destroy_pool(struct tmem_pool *); extern void tmem_new_pool(struct tmem_pool *, uint32_t); #endif /* _TMEM_H */ |