patch-2.2.11 linux/mm/page_alloc.c
Next file: linux/mm/swapfile.c
Previous file: linux/mm/mremap.c
Back to the patch index
Back to the overall index
- Lines: 75
- Date:
Mon Aug 9 12:04:41 1999
- Orig file:
v2.2.10/linux/mm/page_alloc.c
- Orig date:
Thu May 6 14:32:06 1999
diff -u --recursive --new-file v2.2.10/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -155,12 +155,12 @@
change_bit((index) >> (1+(order)), (area)->map)
#define CAN_DMA(x) (PageDMA(x))
#define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
-#define RMQUEUE(order, gfp_mask) \
+#define RMQUEUE_DMA(order) \
do { struct free_area_struct * area = free_area+order; \
unsigned long new_order = order; \
do { struct page *prev = memory_head(area), *ret = prev->next; \
while (memory_head(area) != ret) { \
- if (!(gfp_mask & __GFP_DMA) || CAN_DMA(ret)) { \
+ if (CAN_DMA(ret)) { \
unsigned long map_nr; \
(prev->next = ret->next)->prev = prev; \
map_nr = ret - mem_map; \
@@ -176,6 +176,45 @@
new_order++; area++; \
} while (new_order < NR_MEM_LISTS); \
} while (0)
+#define RMQUEUE_NODMA(order) \
+do { struct free_area_struct * area = free_area+order; \
+ unsigned long new_order = order; \
+ do { struct page *prev = memory_head(area), *ret = prev->next; \
+ while (memory_head(area) != ret) { \
+ if (!CAN_DMA(ret)) { \
+ unsigned long map_nr; \
+ (prev->next = ret->next)->prev = prev; \
+ map_nr = ret - mem_map; \
+ MARK_USED(map_nr, new_order, area); \
+ nr_free_pages -= 1 << order; \
+ EXPAND(ret, map_nr, order, new_order, area); \
+ spin_unlock_irqrestore(&page_alloc_lock, flags); \
+ return ADDRESS(map_nr); \
+ } \
+ prev = ret; \
+ ret = ret->next; \
+ } \
+ new_order++; area++; \
+ } while (new_order < NR_MEM_LISTS); \
+} while (0)
+#define RMQUEUE_ANY(order) \
+do { struct free_area_struct * area = free_area+order; \
+ unsigned long new_order = order; \
+ do { struct page *prev = memory_head(area), *ret = prev->next; \
+ if (memory_head(area) != ret) { \
+ unsigned long map_nr; \
+ (prev->next = ret->next)->prev = prev; \
+ map_nr = ret - mem_map; \
+ MARK_USED(map_nr, new_order, area); \
+ nr_free_pages -= 1 << order; \
+ EXPAND(ret, map_nr, order, new_order, area); \
+ spin_unlock_irqrestore(&page_alloc_lock, flags); \
+ return ADDRESS(map_nr); \
+ \
+ } \
+ new_order++; area++; \
+ } while (new_order < NR_MEM_LISTS); \
+} while (0)
#define EXPAND(map,index,low,high,area) \
do { unsigned long size = 1 << high; \
@@ -236,7 +275,12 @@
}
ok_to_allocate:
spin_lock_irqsave(&page_alloc_lock, flags);
- RMQUEUE(order, gfp_mask);
+ if (gfp_mask & __GFP_DMA)
+ RMQUEUE_DMA(order);
+ else {
+ RMQUEUE_NODMA(order);
+ RMQUEUE_ANY(order);
+ }
spin_unlock_irqrestore(&page_alloc_lock, flags);
/*
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)