patch-1.3.83 linux/mm/memory.c
Next file: linux/mm/mprotect.c
Previous file: linux/mm/filemap.c
Back to the patch index
Back to the overall index
- Lines: 159
- Date:
Wed Apr 3 14:12:34 1996
- Orig file:
v1.3.82/linux/mm/memory.c
- Orig date:
Fri Mar 22 14:05:44 1996
diff -u --recursive --new-file v1.3.82/linux/mm/memory.c linux/mm/memory.c
@@ -135,9 +135,10 @@
printk("%s trying to clear kernel page-directory: not good\n", tsk->comm);
return;
}
+ flush_cache_mm(tsk->mm);
for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
free_one_pgd(page_dir + i);
- invalidate_mm(tsk->mm);
+ flush_tlb_mm(tsk->mm);
}
/*
@@ -156,7 +157,8 @@
printk("%s trying to free kernel page-directory: not good\n", tsk->comm);
return;
}
- invalidate_mm(tsk->mm);
+ flush_cache_mm(tsk->mm);
+ flush_tlb_mm(tsk->mm);
SET_PAGE_DIR(tsk, swapper_pg_dir);
tsk->mm->pgd = swapper_pg_dir; /* or else... */
for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
@@ -171,9 +173,10 @@
if (!(new_pg = pgd_alloc()))
return -ENOMEM;
page_dir = pgd_offset(&init_mm, 0);
+ flush_cache_mm(tsk->mm);
memcpy(new_pg + USER_PTRS_PER_PGD, page_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof (pgd_t));
- invalidate_mm(tsk->mm);
+ flush_tlb_mm(tsk->mm);
SET_PAGE_DIR(tsk, new_pg);
tsk->mm->pgd = new_pg;
return 0;
@@ -285,6 +288,8 @@
cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
src_pgd = pgd_offset(src, address);
dst_pgd = pgd_offset(dst, address);
+ flush_cache_range(src, vma->vm_start, vma->vm_end);
+ flush_cache_range(dst, vma->vm_start, vma->vm_end);
while (address < end) {
error = copy_pmd_range(dst_pgd++, src_pgd++, address, end - address, cow);
if (error)
@@ -292,8 +297,8 @@
address = (address + PGDIR_SIZE) & PGDIR_MASK;
}
/* Note that the src ptes get c-o-w treatment, so they change too. */
- invalidate_range(src, vma->vm_start, vma->vm_end);
- invalidate_range(dst, vma->vm_start, vma->vm_end);
+ flush_tlb_range(src, vma->vm_start, vma->vm_end);
+ flush_tlb_range(dst, vma->vm_start, vma->vm_end);
return error;
}
@@ -373,12 +378,13 @@
unsigned long end = address + size;
dir = pgd_offset(mm, address);
+ flush_cache_range(mm, end - size, end);
while (address < end) {
zap_pmd_range(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
- invalidate_range(mm, end - size, end);
+ flush_tlb_range(mm, end - size, end);
return 0;
}
@@ -428,6 +434,7 @@
zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE, prot));
dir = pgd_offset(current->mm, address);
+ flush_cache_range(current->mm, beg, end);
while (address < end) {
pmd_t *pmd = pmd_alloc(dir, address);
error = -ENOMEM;
@@ -439,7 +446,7 @@
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
- invalidate_range(current->mm, beg, end);
+ flush_tlb_range(current->mm, beg, end);
return error;
}
@@ -499,6 +506,7 @@
offset -= from;
dir = pgd_offset(current->mm, from);
+ flush_cache_range(current->mm, beg, from);
while (from < end) {
pmd_t *pmd = pmd_alloc(dir, from);
error = -ENOMEM;
@@ -510,7 +518,7 @@
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
- invalidate_range(current->mm, beg, from);
+ flush_tlb_range(current->mm, beg, from);
return error;
}
@@ -619,19 +627,24 @@
if (mem_map[MAP_NR(old_page)].reserved)
++vma->vm_mm->rss;
copy_page(old_page,new_page);
+ flush_page_to_ram(old_page);
+ flush_page_to_ram(new_page);
+ flush_cache_page(vma, address);
set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
free_page(old_page);
- invalidate_page(vma, address);
+ flush_tlb_page(vma, address);
return;
}
+ flush_cache_page(vma, address);
set_pte(page_table, BAD_PAGE);
+ flush_tlb_page(vma, address);
free_page(old_page);
oom(tsk);
- invalidate_page(vma, address);
return;
}
+ flush_cache_page(vma, address);
set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
- invalidate_page(vma, address);
+ flush_tlb_page(vma, address);
if (new_page)
free_page(new_page);
return;
@@ -901,6 +914,7 @@
}
address &= PAGE_MASK;
if (!vma->vm_ops || !vma->vm_ops->nopage) {
+ flush_cache_page(vma, address);
get_empty_page(tsk, vma, page_table, write_access);
return;
}
@@ -914,7 +928,9 @@
page = vma->vm_ops->nopage(vma, address, write_access && !(vma->vm_flags & VM_SHARED));
if (!page) {
send_sig(SIGBUS, current, 1);
+ flush_cache_page(vma, address);
put_page(page_table, BAD_PAGE);
+ flush_tlb_page(vma, address);
return;
}
/*
@@ -932,7 +948,9 @@
entry = pte_mkwrite(pte_mkdirty(entry));
} else if (mem_map[MAP_NR(page)].count > 1 && !(vma->vm_flags & VM_SHARED))
entry = pte_wrprotect(entry);
+ flush_cache_page(vma, address);
put_page(page_table, entry);
+ flush_tlb_page(vma, address);
}
/*
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov
with Sam's (original) version of this