assorted dead code removal around asm/pgtable.h
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQqUNBr3gm4hGXdBJlZ7Krx/gZQ6wUCaNh9cwAKCRBZ7Krx/gZQ 68SbAP9aRaNQf4whe9uxuR+FPbo8AM2rSKEj39w47bNl+FJhsAD8CQWm3D6cQhXM 1ecJI51uGBeBB4vaq+/7qT+j64Nxhwc= =7abW -----END PGP SIGNATURE----- Merge tag 'pull-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs Pull misc non-vfs updates from Al Viro: "Assorted dead code removal around asm/pgtable.h" * tag 'pull-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: alpha: unobfuscate _PAGE_P() definition kill FIRST_USER_PGD_NR alpha: get rid of the remnants of BAD_PAGE and friends SET_PAGE_DIR() users had been gone since 2.3.12pre1 PAGE_PTR() had been last used outside of arch/* in 1.1.94 csky: remove BS check for FAULT_FLAG_ALLOW_RETRY
This commit is contained in:
commit
9b0d551bcc
|
@ -107,7 +107,7 @@ struct vm_area_struct;
|
|||
|
||||
#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
|
||||
|
||||
#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
|
||||
#define _PAGE_P(x) _PAGE_NORMAL((x) | _PAGE_FOW)
|
||||
#define _PAGE_S(x) _PAGE_NORMAL(x)
|
||||
|
||||
/*
|
||||
|
@ -126,34 +126,11 @@ struct vm_area_struct;
|
|||
#define pgprot_noncached(prot) (prot)
|
||||
|
||||
/*
|
||||
* BAD_PAGETABLE is used when we need a bogus page-table, while
|
||||
* BAD_PAGE is used for a bogus page.
|
||||
*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
extern pte_t __bad_page(void);
|
||||
extern pmd_t * __bad_pagetable(void);
|
||||
|
||||
extern unsigned long __zero_page(void);
|
||||
|
||||
#define BAD_PAGETABLE __bad_pagetable()
|
||||
#define BAD_PAGE __bad_page()
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE))
|
||||
|
||||
/* number of bits that fit into a memory pointer */
|
||||
#define BITS_PER_PTR (8*sizeof(unsigned long))
|
||||
|
||||
/* to align the pointer to a pointer address */
|
||||
#define PTR_MASK (~(sizeof(void*)-1))
|
||||
|
||||
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
|
||||
#define SIZEOF_PTR_LOG2 3
|
||||
|
||||
/* to find an entry in a page-table */
|
||||
#define PAGE_PTR(address) \
|
||||
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
|
||||
|
||||
/*
|
||||
* On certain platforms whose physical address space can overlap KSEG,
|
||||
* namely EV6 and above, we must re-twiddle the physaddr to restore the
|
||||
|
|
|
@ -60,33 +60,6 @@ pgd_alloc(struct mm_struct *mm)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* BAD_PAGE is the page that is used for page faults when linux
|
||||
* is out-of-memory. Older versions of linux just did a
|
||||
* do_exit(), but using this instead means there is less risk
|
||||
* for a process dying in kernel mode, possibly leaving an inode
|
||||
* unused etc..
|
||||
*
|
||||
* BAD_PAGETABLE is the accompanying page-table: it is initialized
|
||||
* to point to BAD_PAGE entries.
|
||||
*
|
||||
* ZERO_PAGE is a special page that is used for zero-initialized
|
||||
* data and COW.
|
||||
*/
|
||||
pmd_t *
|
||||
__bad_pagetable(void)
|
||||
{
|
||||
memset(absolute_pointer(EMPTY_PGT), 0, PAGE_SIZE);
|
||||
return (pmd_t *) EMPTY_PGT;
|
||||
}
|
||||
|
||||
pte_t
|
||||
__bad_page(void)
|
||||
{
|
||||
memset(absolute_pointer(EMPTY_PGE), 0, PAGE_SIZE);
|
||||
return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
load_PCB(struct pcb_struct *pcb)
|
||||
{
|
||||
|
|
|
@ -277,7 +277,7 @@ retry:
|
|||
if (fault & VM_FAULT_COMPLETED)
|
||||
return;
|
||||
|
||||
if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
|
||||
if (unlikely(fault & VM_FAULT_RETRY)) {
|
||||
flags |= FAULT_FLAG_TRIED;
|
||||
|
||||
/*
|
||||
|
|
|
@ -119,16 +119,6 @@ extern void *empty_zero_page;
|
|||
*/
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
/* number of bits that fit into a memory pointer */
|
||||
#define BITS_PER_PTR (8*sizeof(unsigned long))
|
||||
|
||||
/* to align the pointer to a pointer address */
|
||||
#define PTR_MASK (~(sizeof(void*)-1))
|
||||
|
||||
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
|
||||
/* 64-bit machines, beware! SRB. */
|
||||
#define SIZEOF_PTR_LOG2 2
|
||||
|
||||
extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
|
||||
|
||||
/*
|
||||
|
|
|
@ -99,7 +99,6 @@ extern pte_t *va_to_pte(unsigned long address);
|
|||
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
|
||||
|
||||
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
|
||||
#define FIRST_USER_PGD_NR 0
|
||||
|
||||
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
|
||||
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
|
||||
|
|
|
@ -183,23 +183,6 @@ extern void paging_init(void);
|
|||
extern unsigned long empty_zero_page[2048];
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
/* number of bits that fit into a memory pointer */
|
||||
#define BITS_PER_PTR (8*sizeof(unsigned long))
|
||||
|
||||
/* to align the pointer to a pointer address */
|
||||
#define PTR_MASK (~(sizeof(void *)-1))
|
||||
|
||||
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
|
||||
/* 64-bit machines, beware! SRB. */
|
||||
#define SIZEOF_PTR_LOG2 2
|
||||
|
||||
/* to find an entry in a page-table */
|
||||
#define PAGE_PTR(address) \
|
||||
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
|
||||
|
||||
/* to set the page-dir */
|
||||
#define SET_PAGE_DIR(tsk, pgdir)
|
||||
|
||||
#define pte_none(x) (!pte_val(x))
|
||||
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
|
||||
#define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
|
||||
|
|
|
@ -58,7 +58,6 @@
|
|||
#define PTRS_PER_PTE_SHIFT 10
|
||||
#define PTRS_PER_PGD 1024
|
||||
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
|
||||
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue