|
19 | 19 | #include <asm/x86_init.h> |
20 | 20 |
|
21 | 21 | void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); |
| 22 | +void ptdump_walk_pgd_level_checkwx(void); |
| 23 | + |
| 24 | +#ifdef CONFIG_DEBUG_WX |
| 25 | +#define debug_checkwx() ptdump_walk_pgd_level_checkwx() |
| 26 | +#else |
| 27 | +#define debug_checkwx() do { } while (0) |
| 28 | +#endif |
22 | 29 |
|
23 | 30 | /* |
24 | 31 | * ZERO_PAGE is a global shared page that is always zero: used |
@@ -142,12 +149,12 @@ static inline unsigned long pte_pfn(pte_t pte) |
142 | 149 |
|
143 | 150 | static inline unsigned long pmd_pfn(pmd_t pmd) |
144 | 151 | { |
145 | | - return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; |
| 152 | + return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; |
146 | 153 | } |
147 | 154 |
|
148 | 155 | static inline unsigned long pud_pfn(pud_t pud) |
149 | 156 | { |
150 | | - return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; |
| 157 | + return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; |
151 | 158 | } |
152 | 159 |
|
153 | 160 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
@@ -379,7 +386,9 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) |
379 | 386 | return __pgprot(preservebits | addbits); |
380 | 387 | } |
381 | 388 |
|
382 | | -#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) |
| 389 | +#define pte_pgprot(x) __pgprot(pte_flags(x)) |
| 390 | +#define pmd_pgprot(x) __pgprot(pmd_flags(x)) |
| 391 | +#define pud_pgprot(x) __pgprot(pud_flags(x)) |
383 | 392 |
|
384 | 393 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
385 | 394 |
|
@@ -502,14 +511,15 @@ static inline int pmd_none(pmd_t pmd) |
502 | 511 |
|
503 | 512 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
504 | 513 | { |
505 | | - return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); |
| 514 | + return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); |
506 | 515 | } |
507 | 516 |
|
508 | 517 | /* |
509 | 518 | * Currently stuck as a macro due to indirect forward reference to |
510 | 519 | * linux/mmzone.h's __section_mem_map_addr() definition: |
511 | 520 | */ |
512 | | -#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) |
| 521 | +#define pmd_page(pmd) \ |
| 522 | + pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT) |
513 | 523 |
|
514 | 524 | /* |
515 | 525 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] |
@@ -570,14 +580,15 @@ static inline int pud_present(pud_t pud) |
570 | 580 |
|
571 | 581 | static inline unsigned long pud_page_vaddr(pud_t pud) |
572 | 582 | { |
573 | | - return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); |
| 583 | + return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); |
574 | 584 | } |
575 | 585 |
|
576 | 586 | /* |
577 | 587 | * Currently stuck as a macro due to indirect forward reference to |
578 | 588 | * linux/mmzone.h's __section_mem_map_addr() definition: |
579 | 589 | */ |
580 | | -#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) |
| 590 | +#define pud_page(pud) \ |
| 591 | + pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT) |
581 | 592 |
|
582 | 593 | /* Find an entry in the second-level page table.. */ |
583 | 594 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
|
0 commit comments