patch-1.3.57 linux/mm/page_alloc.c
Next file: linux/mm/page_io.c
Previous file: linux/mm/memory.c
Back to the patch index
Back to the overall index
- Lines: 305
- Date:
Wed Jan 10 09:28:05 1996
- Orig file:
v1.3.56/linux/mm/page_alloc.c
- Orig date:
Thu Jan 1 02:00:00 1970
diff -u --recursive --new-file v1.3.56/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -0,0 +1,304 @@
+/*
+ * linux/mm/page_alloc.c
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ * Swap reorganised 29.12.95, Stephen Tweedie
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/swapctl.h>
+
+#include <asm/dma.h>
+#include <asm/system.h> /* for cli()/sti() */
+#include <asm/segment.h> /* for memcpy_to/fromfs */
+#include <asm/bitops.h>
+#include <asm/pgtable.h>
+
+static inline void add_mem_queue(struct mem_list * head, struct mem_list * entry)
+{
+ entry->prev = head;
+ (entry->next = head->next)->prev = entry;
+ head->next = entry;
+}
+
+static inline void remove_mem_queue(struct mem_list * head, struct mem_list * entry)
+{
+ struct mem_list * next = entry->next;
+ (next->prev = entry->prev)->next = next;
+}
+
+/*
+ * Free_page() adds the page to the free lists. This is optimized for
+ * fast normal cases (no error jumps taken normally).
+ *
+ * The way to optimize jumps for gcc-2.2.2 is to:
+ * - select the "normal" case and put it inside the if () { XXX }
+ * - no else-statements if you can avoid them
+ *
+ * With the above two rules, you get a straight-line execution path
+ * for the normal case, giving better asm-code.
+ *
+ * free_page() may sleep since the page being freed may be a buffer
+ * page or present in the swap cache. It will not sleep, however,
+ * for a freshly allocated page (get_free_page()).
+ */
+
+/*
+ * Buddy system. Hairy. You really aren't expected to understand this
+ */
+static inline void free_pages_ok(unsigned long addr, unsigned long order)
+{
+ unsigned long index = MAP_NR(addr) >> (1 + order);
+ unsigned long mask = PAGE_MASK << order;
+
+ addr &= mask;
+ nr_free_pages += 1 << order;
+ while (order < NR_MEM_LISTS-1) {
+ if (!change_bit(index, free_area_map[order]))
+ break;
+ remove_mem_queue(free_area_list+order, (struct mem_list *) (addr ^ (1+~mask)));
+ order++;
+ index >>= 1;
+ mask <<= 1;
+ addr &= mask;
+ }
+ add_mem_queue(free_area_list+order, (struct mem_list *) addr);
+}
+
+static inline void check_free_buffers(unsigned long addr)
+{
+ struct buffer_head * bh;
+
+ bh = buffer_pages[MAP_NR(addr)];
+ if (bh) {
+ struct buffer_head *tmp = bh;
+ do {
+ if (tmp->b_list == BUF_SHARED
+ && tmp->b_dev != B_FREE)
+ refile_buffer(tmp);
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+ }
+}
+
+void free_pages(unsigned long addr, unsigned long order)
+{
+ if (MAP_NR(addr) < MAP_NR(high_memory)) {
+ unsigned long flag;
+ mem_map_t * map = mem_map + MAP_NR(addr);
+ if (map->reserved)
+ return;
+ if (map->count) {
+ save_flags(flag);
+ cli();
+ if (!--map->count) {
+ free_pages_ok(addr, order);
+ delete_from_swap_cache(addr);
+ }
+ restore_flags(flag);
+ if (map->count == 1)
+ check_free_buffers(addr);
+ return;
+ }
+ printk("Trying to free free memory (%08lx): memory probably corrupted\n",addr);
+ printk("PC = %p\n", __builtin_return_address(0));
+ return;
+ }
+}
+
+/*
+ * Some ugly macros to speed up __get_free_pages()..
+ */
+#define RMQUEUE(order, limit) \
+do { struct mem_list * queue = free_area_list+order; \
+ unsigned long new_order = order; \
+ do { struct mem_list *prev = queue, *ret; \
+ while (queue != (ret = prev->next)) { \
+ if ((unsigned long) ret < (limit)) { \
+ (prev->next = ret->next)->prev = prev; \
+ mark_used((unsigned long) ret, new_order); \
+ nr_free_pages -= 1 << order; \
+ restore_flags(flags); \
+ EXPAND(ret, order, new_order); \
+ return (unsigned long) ret; \
+ } \
+ prev = ret; \
+ } \
+ new_order++; queue++; \
+ } while (new_order < NR_MEM_LISTS); \
+} while (0)
+
+static inline int mark_used(unsigned long addr, unsigned long order)
+{
+ return change_bit(MAP_NR(addr) >> (1+order), free_area_map[order]);
+}
+
+#define EXPAND(addr,low,high) \
+do { unsigned long size = PAGE_SIZE << high; \
+ while (high > low) { \
+ high--; size >>= 1; cli(); \
+ add_mem_queue(free_area_list+high, addr); \
+ mark_used((unsigned long) addr, high); \
+ restore_flags(flags); \
+ addr = (struct mem_list *) (size + (unsigned long) addr); \
+ } mem_map[MAP_NR((unsigned long) addr)].count = 1; \
+ mem_map[MAP_NR((unsigned long) addr)].age = PAGE_INITIAL_AGE; \
+} while (0)
+
+unsigned long __get_free_pages(int priority, unsigned long order, unsigned long limit)
+{
+ unsigned long flags;
+ int reserved_pages;
+
+ if (order >= NR_MEM_LISTS)
+ return 0;
+ if (intr_count && priority != GFP_ATOMIC) {
+ static int count = 0;
+ if (++count < 5) {
+ printk("gfp called nonatomically from interrupt %p\n",
+ __builtin_return_address(0));
+ priority = GFP_ATOMIC;
+ }
+ }
+ reserved_pages = 5;
+ if (priority != GFP_NFS)
+ reserved_pages = min_free_pages;
+ save_flags(flags);
+repeat:
+ cli();
+ if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
+ RMQUEUE(order, limit);
+ restore_flags(flags);
+ return 0;
+ }
+ restore_flags(flags);
+ if (priority != GFP_BUFFER && try_to_free_page(priority, limit))
+ goto repeat;
+ return 0;
+}
+
+/*
+ * Show free area list (used inside shift_scroll-lock stuff)
+ * We also calculate the percentage fragmentation. We do this by counting the
+ * memory on each free list with the exception of the first item on the list.
+ */
+void show_free_areas(void)
+{
+ unsigned long order, flags;
+ unsigned long total = 0;
+
+ printk("Free pages: %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
+ save_flags(flags);
+ cli();
+ for (order=0 ; order < NR_MEM_LISTS; order++) {
+ struct mem_list * tmp;
+ unsigned long nr = 0;
+ for (tmp = free_area_list[order].next ; tmp != free_area_list + order ; tmp = tmp->next) {
+ nr ++;
+ }
+ total += nr * ((PAGE_SIZE>>10) << order);
+ printk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order);
+ }
+ restore_flags(flags);
+ printk("= %lukB)\n", total);
+#ifdef SWAP_CACHE_INFO
+ show_swap_cache_info();
+#endif
+}
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+/*
+ * set up the free-area data structures:
+ * - mark all pages reserved
+ * - mark all memory queues empty
+ * - clear the memory bitmaps
+ */
+unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
+{
+ mem_map_t * p;
+ unsigned long mask = PAGE_MASK;
+ int i;
+
+ /*
+ * select nr of pages we try to keep free for important stuff
+ * with a minimum of 16 pages. This is totally arbitrary
+ */
+ i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
+ if (i < 16)
+ i = 16;
+ min_free_pages = i;
+ free_pages_low = i + (i>>1);
+ free_pages_high = i + i;
+ start_mem = init_swap_cache(start_mem, end_mem);
+ mem_map = (mem_map_t *) start_mem;
+ p = mem_map + MAP_NR(end_mem);
+ start_mem = LONG_ALIGN((unsigned long) p);
+ memset(mem_map, 0, start_mem - (unsigned long) mem_map);
+ do {
+ --p;
+ p->reserved = 1;
+ } while (p > mem_map);
+
+ for (i = 0 ; i < NR_MEM_LISTS ; i++) {
+ unsigned long bitmap_size;
+ free_area_list[i].prev = free_area_list[i].next = &free_area_list[i];
+ mask += mask;
+ end_mem = (end_mem + ~mask) & mask;
+ bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
+ bitmap_size = (bitmap_size + 7) >> 3;
+ bitmap_size = LONG_ALIGN(bitmap_size);
+ free_area_map[i] = (unsigned int *) start_mem;
+ memset((void *) start_mem, 0, bitmap_size);
+ start_mem += bitmap_size;
+ }
+ return start_mem;
+}
+
+/*
+ * The tests may look silly, but it essentially makes sure that
+ * no other process did a swap-in on us just as we were waiting.
+ *
+ * Also, don't bother to add to the swap cache if this page-in
+ * was due to a write access.
+ */
+void swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
+ pte_t * page_table, unsigned long entry, int write_access)
+{
+ unsigned long page = __get_free_page(GFP_KERNEL);
+
+ if (pte_val(*page_table) != entry) {
+ free_page(page);
+ return;
+ }
+ if (!page) {
+ set_pte(page_table, BAD_PAGE);
+ swap_free(entry);
+ oom(tsk);
+ return;
+ }
+ read_swap_page(entry, (char *) page);
+ if (pte_val(*page_table) != entry) {
+ free_page(page);
+ return;
+ }
+ vma->vm_mm->rss++;
+ tsk->maj_flt++;
+ if (!write_access && add_to_swap_cache(page, entry)) {
+ set_pte(page_table, mk_pte(page, vma->vm_page_prot));
+ return;
+ }
+ set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
+ swap_free(entry);
+ return;
+}
+
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov
with Sam's (original) version of this