Skip to content

Commit 7ce96f0

Browse files
xxkentabrodkin
authored andcommitted
ARC: Proper kernel mapping
Use proper page flags for different memory sections, for example X is used only for text section. Previously we mapped kernel as an one big chunk. Now kernel mapping is page-based, so it requires to have memblock initialized to be able to allocate page entries dynamically.
1 parent 6b80fb7 commit 7ce96f0

File tree

5 files changed

+141
-71
lines changed

5 files changed

+141
-71
lines changed

arch/arc/include/asm/pgtable-bits-arcv3.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,8 @@
7373
/* TBD: kernel is RWX by default, split it to code/data */
7474
#define _PAGE_KERNEL (_PAGE_TABLE | \
7575
/* writable */ \
76-
_PAGE_NOTEXEC_U | /* exec k */ \
76+
_PAGE_NOTEXEC_U | \
77+
_PAGE_NOTEXEC_K | \
7778
/* AP kernel only | global */ \
7879
_PAGE_ACCESSED | \
7980
_PAGE_SHARED_INNER | \
@@ -82,7 +83,12 @@
8283
#define PAGE_NONE __pgprot(_PAGE_BASE) /* TBD */
8384
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
8485
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
85-
#define PAGE_KERNEL_BLK __pgprot(_PAGE_KERNEL & ~_PAGE_LINK)
86+
#define PAGE_KERNEL_RW __pgprot(_PAGE_KERNEL)
87+
#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL & ~_PAGE_NOTEXEC_K)
88+
89+
#define PAGE_KERNEL_BLK __pgprot(_PAGE_KERNEL & ~_PAGE_LINK)
90+
#define PAGE_KERNEL_BLK_RW __pgprot(_PAGE_KERNEL & ~_PAGE_LINK)
91+
#define PAGE_KERNEL_BLK_RWX __pgprot(_PAGE_KERNEL & ~_PAGE_NOTEXEC_K & ~_PAGE_LINK)
8692

8793
#define PAGE_R __pgprot(_PAGE_BASE)
8894
#define PAGE_RW __pgprot(_PAGE_RW)

arch/arc/kernel/head.S

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -91,14 +91,12 @@
9191
; All registers are clobbered.
9292
.macro BUILD_PAGE_TABLE, link_addr, phy_addr, link_end, pgd, pud, pmd, cur, tmp, val, foo
9393

94-
; FIXME: we need asserts about kernel size <= allocated size
95-
; FIXME: we need a better way to generalize this
9694
#if defined(CONFIG_ARC_MMU_V6_48) && defined(CONFIG_ARC_PAGE_SIZE_4K)
9795

9896
; Three-level page table, PGD + PUD + PMD
99-
#define BUILD_PGD_ENTRY BUILD_PAGE_TABLE_ENTRY \pgd, \link_addr, \pud, \cur, \tmp, PTRS_PER_PGD, PGDIR_SHIFT, PAGE_KERNEL
100-
#define BUILD_PUD_ENTRY BUILD_PAGE_TABLE_ENTRY \pud, \link_addr, \pmd, \cur, \tmp, PTRS_PER_PUD, PUD_SHIFT, PAGE_KERNEL
101-
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK
97+
#define BUILD_PGD_ENTRY BUILD_PAGE_TABLE_ENTRY \pgd, \link_addr, \pud, \cur, \tmp, PTRS_PER_PGD, PGDIR_SHIFT, PAGE_KERNEL_RWX
98+
#define BUILD_PUD_ENTRY BUILD_PAGE_TABLE_ENTRY \pud, \link_addr, \pmd, \cur, \tmp, PTRS_PER_PUD, PUD_SHIFT, PAGE_KERNEL_RWX
99+
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK_RWX
102100
#define ENTRY_SIZE PMD_SIZE
103101
#define ENTRY_LABEL .Lbuild_pmd_entry\@
104102
#define NEXT_SIZE PUD_SIZE
@@ -107,9 +105,9 @@
107105
#elif defined(CONFIG_ARC_MMU_V6_48) && defined(CONFIG_ARC_PAGE_SIZE_16K)
108106

109107
; Three-level page table, PGD + PUD + PMD
110-
#define BUILD_PGD_ENTRY BUILD_PAGE_TABLE_ENTRY \pgd, \link_addr, \pud, \cur, \tmp, PTRS_PER_PGD, PGDIR_SHIFT, PAGE_KERNEL
111-
#define BUILD_PUD_ENTRY BUILD_PAGE_TABLE_ENTRY \pud, \link_addr, \pmd, \cur, \tmp, PTRS_PER_PUD, PUD_SHIFT, PAGE_KERNEL
112-
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK
108+
#define BUILD_PGD_ENTRY BUILD_PAGE_TABLE_ENTRY \pgd, \link_addr, \pud, \cur, \tmp, PTRS_PER_PGD, PGDIR_SHIFT, PAGE_KERNEL_RWX
109+
#define BUILD_PUD_ENTRY BUILD_PAGE_TABLE_ENTRY \pud, \link_addr, \pmd, \cur, \tmp, PTRS_PER_PUD, PUD_SHIFT, PAGE_KERNEL_RWX
110+
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK_RWX
113111
#define ENTRY_SIZE PMD_SIZE
114112
#define ENTRY_LABEL .Lbuild_pmd_entry\@
115113
#define NEXT_SIZE PUD_SIZE
@@ -118,17 +116,17 @@
118116
#elif defined(CONFIG_ARC_MMU_V6_48) && defined(CONFIG_ARC_PAGE_SIZE_64K) || defined(CONFIG_ARC_MMU_V6_52) || defined(CONFIG_ARC_MMU_V6_32)
119117

120118
; Two-level page table, PGD + PMD
121-
#define BUILD_PGD_ENTRY BUILD_PAGE_TABLE_ENTRY \pgd, \link_addr, \pmd, \cur, \tmp, PTRS_PER_PGD, PGDIR_SHIFT, PAGE_KERNEL
119+
#define BUILD_PGD_ENTRY BUILD_PAGE_TABLE_ENTRY \pgd, \link_addr, \pmd, \cur, \tmp, PTRS_PER_PGD, PGDIR_SHIFT, PAGE_KERNEL_RWX
122120
#define BUILD_PUD_ENTRY
123-
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK
121+
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK_RWX
124122
#define ENTRY_SIZE PMD_SIZE
125123
#define ENTRY_LABEL .Lbuild_pmd_entry\@
126124
#define NEXT_SIZE PGDIR_SIZE
127125
#define ENTRY_LABEL_NEXT .Lbuild_pgd_entry\@
128126

129127
#endif
130128

131-
ADDR \val, \link_addr, NEXT_SIZE
129+
ADDR \val, \link_addr, (NEXT_SIZE-1)
132130
MOVR \foo, \pmd
133131

134132
.Lbuild_pgd_entry\@:
@@ -148,13 +146,13 @@
148146
CMPR \link_addr, \link_end
149147
bge .Lbuild_end\@
150148
CMPR \link_addr, \val
151-
bge.d .Linc\@
149+
bgt.d .Linc\@
152150
ADDR \foo, \foo, 0x8
153151
b ENTRY_LABEL
154152

155153
.Linc\@:
156154
MOVR \pmd, \foo
157-
ADDR \val, \foo, NEXT_SIZE
155+
ADDR \val, \link_addr, (NEXT_SIZE-1)
158156
b ENTRY_LABEL_NEXT
159157

160158
.Lbuild_end\@:

arch/arc/kernel/setup.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -499,8 +499,8 @@ void setup_processor(void)
499499
arc_chk_core_config(&info);
500500

501501
arc_init_IRQ();
502-
/* No ned to setup MMU for secondary CPU for ARCv3. */
503-
if (!IS_ENABLED(CONFIG_ISA_ARCV3) || c == 0)
502+
/* ARCv3 MMU must be initialized after setup_arch_memory. */
503+
if (!IS_ENABLED(CONFIG_ISA_ARCV3))
504504
arc_mmu_init();
505505
arc_cache_init();
506506
}
@@ -603,6 +603,8 @@ void __init setup_arch(char **cmdline_p)
603603

604604
setup_processor();
605605
setup_arch_memory();
606+
if (IS_ENABLED(CONFIG_ISA_ARCV3))
607+
arc_mmu_init();
606608

607609
/* copy flat DT out of .init and then unflatten it */
608610
unflatten_and_copy_device_tree();

arch/arc/kernel/vmlinux.lds.S

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,15 +56,12 @@ SECTIONS
5656
* decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
5757
*/
5858

59+
. = ALIGN(PAGE_SIZE);
5960
__init_begin = .;
61+
__init_data_begin = .;
6062

6163
.init.ramfs : { INIT_RAM_FS }
6264

63-
. = ALIGN(PAGE_SIZE);
64-
65-
HEAD_TEXT_SECTION
66-
INIT_TEXT_SECTION(L1_CACHE_BYTES)
67-
6865
/* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
6966
.init.data : {
7067
INIT_DATA
@@ -82,6 +79,14 @@ SECTIONS
8279
PERCPU_SECTION(L1_CACHE_BYTES)
8380

8481
. = ALIGN(PAGE_SIZE);
82+
__init_data_end = .;
83+
__init_text_begin = .;
84+
85+
HEAD_TEXT_SECTION
86+
INIT_TEXT_SECTION(L1_CACHE_BYTES)
87+
88+
. = ALIGN(PAGE_SIZE);
89+
__init_text_end = .;
8590
__init_end = .;
8691

8792
.text : {
@@ -98,6 +103,7 @@ SECTIONS
98103
*(.gnu.warning)
99104
}
100105
EXCEPTION_TABLE(L1_CACHE_BYTES)
106+
. = ALIGN(PAGE_SIZE);
101107
_etext = .;
102108

103109
_sdata = .;

arch/arc/mm/tlb-arcv3.c

Lines changed: 107 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,14 @@
22

33
#include <linux/mm.h>
44
#include <linux/types.h>
5+
#include <linux/memblock.h>
56

67
#include <asm/arcregs.h>
78
#include <asm/mmu_context.h>
89
#include <asm/mmu.h>
910
#include <asm/setup.h>
1011
#include <asm/fixmap.h>
12+
#include <asm/pgalloc.h>
1113

1214
/* A copy of the ASID from the PID reg is kept in asid_cache */
1315
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
@@ -230,89 +232,147 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
230232
set_pte(pte, pfn_pte(PFN_DOWN(phys), prot));
231233
}
232234

233-
/*
234-
* Map the kernel code/data into page tables for a given @mm
235-
*
236-
* Assumes
237-
* - pgd, pud and pmd are already allocated
238-
* - pud is wired up to pgd and pmd to pud
239-
*
240-
* TODO: assumes 4 levels, implement properly using p*d_addr_end loops
241-
*/
242-
int arc_map_kernel_in_mm(struct mm_struct *mm)
235+
static phys_addr_t __init arc_map_early_alloc_page(void)
243236
{
244-
unsigned long addr = PAGE_OFFSET;
245-
unsigned long end = PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE;
237+
phys_addr_t phys;
238+
239+
/* At early stage we have mapped PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE */
240+
phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
241+
__pa(PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE));
242+
243+
memset(__va(phys), 0, PAGE_SIZE);
244+
245+
return phys;
246+
}
247+
248+
static int __init arc_map_segment_in_mm(struct mm_struct *mm,
249+
unsigned long start,
250+
unsigned long end,
251+
pgprot_t prot)
252+
{
253+
unsigned long addr;
254+
phys_addr_t phys;
246255
pgd_t *pgd;
247256
p4d_t *p4d;
248257
pud_t *pud;
249258
pmd_t *pmd;
259+
pte_t *pte;
250260

251-
do {
252-
pgprot_t prot = PAGE_KERNEL_BLK;
261+
BUG_ON(start & (PAGE_SIZE-1));
262+
BUG_ON(end & (PAGE_SIZE-1));
253263

264+
/* TODO: Use bigger blocks if possible */
265+
addr = start;
266+
do {
254267
pgd = pgd_offset(mm, addr);
255-
if (pgd_none(*pgd) || !pgd_present(*pgd))
268+
if (pgd_none(*pgd)){
256269
return 1;
270+
}
257271

258272
p4d = p4d_offset(pgd, addr);
259-
if (p4d_none(*p4d) || !p4d_present(*p4d))
260-
return 1;
273+
if (p4d_none(*p4d)) {
274+
phys = arc_map_early_alloc_page();
275+
p4d_populate(mm, p4d, __va(phys));
276+
}
261277

262278
pud = pud_offset(p4d, addr);
263-
if (pud_none(*pud) || !pud_present(*pud))
264-
return 1;
279+
if (pud_none(*pud)) {
280+
phys = arc_map_early_alloc_page();
281+
pud_populate(mm, pud, __va(phys));
282+
}
265283

266284
pmd = pmd_offset(pud, addr);
267-
if (!pmd_none(*pmd) || pmd_present(*pmd))
268-
return 1;
285+
if (pmd_none(*pmd)) {
286+
phys = arc_map_early_alloc_page();
287+
pmd_populate_kernel(mm, pmd, __va(phys));
288+
}
269289

270-
set_pmd(pmd, pfn_pmd(virt_to_pfn(addr), prot));
271-
addr = pmd_addr_end(addr, end);
272-
}
273-
while (addr != end);
290+
pte = pte_offset_kernel(pmd, addr);
291+
292+
set_pte(pte, pfn_pte(virt_to_pfn(addr), prot));
293+
294+
addr += PAGE_SIZE;
295+
} while (addr < end);
274296

275297
return 0;
276298
}
277299

278-
void arc_paging_init(void)
300+
/*
301+
* Map the kernel code/data into page tables for a given @mm
302+
*/
303+
int __init arc_map_kernel_in_mm(struct mm_struct *mm)
279304
{
280-
#if CONFIG_PGTABLE_LEVELS == 4
281-
unsigned int idx;
305+
extern char __init_text_begin[];
306+
extern char __init_data_begin[];
307+
extern char __init_text_end[];
308+
extern char __init_data_end[];
309+
310+
arc_map_segment_in_mm(mm,
311+
PAGE_OFFSET,
312+
(unsigned long) __init_data_begin,
313+
PAGE_KERNEL_RWX);
314+
arc_map_segment_in_mm(mm,
315+
(unsigned long) __init_data_begin,
316+
(unsigned long) __init_data_end,
317+
PAGE_KERNEL_RW);
318+
arc_map_segment_in_mm(mm,
319+
(unsigned long) __init_text_begin,
320+
(unsigned long) __init_text_end,
321+
PAGE_KERNEL_RWX);
322+
arc_map_segment_in_mm(mm,
323+
(unsigned long) _stext,
324+
(unsigned long) _etext,
325+
PAGE_KERNEL_RWX);
326+
arc_map_segment_in_mm(mm,
327+
(unsigned long) _sdata,
328+
(unsigned long) _end,
329+
PAGE_KERNEL_RW);
282330

283-
idx = pgd_index(PAGE_OFFSET);
284-
swapper_pg_dir[idx] = pfn_pgd(virt_to_pfn(swapper_pud), PAGE_TABLE);
285-
ptw_flush(&swapper_pg_dir[idx]);
331+
return 0;
332+
}
286333

287-
idx = pud_index(PAGE_OFFSET);
288-
swapper_pud[idx] = pfn_pud(virt_to_pfn(swapper_pmd), PAGE_TABLE);
289-
ptw_flush(&swapper_pud[idx]);
334+
int __init arc_map_memory_in_mm(struct mm_struct *mm)
335+
{
336+
u64 i;
337+
phys_addr_t start, end;
290338

291-
idx = pud_index(PAGE_OFFSET + 0x40000000);
292-
swapper_pud[idx] = pfn_pud(virt_to_pfn(&swapper_pmd[PTRS_PER_PMD]), PAGE_TABLE);
293-
ptw_flush(&swapper_pud[idx]);
339+
/*
340+
* Kernel (__pa(PAGE_OFFSET) to __pa(_end) is already mapped by
341+
* arc_map_kernel_in_mm(), so map only >= __pa(_end).
342+
*
343+
* We expect that kernel is mapped to the start of physical memory,
344+
* so start >= __pa(PAGE_OFFSET).
345+
*/
346+
for_each_mem_range(i, &start, &end) {
347+
if (start >= end)
348+
break;
294349

295-
#elif CONFIG_PGTABLE_LEVELS == 3
296-
unsigned int idx;
350+
if (end <= __pa(_end))
351+
continue;
297352

298-
idx = pgd_index(PAGE_OFFSET);
299-
swapper_pg_dir[idx] = pfn_pgd(virt_to_pfn(swapper_pmd), PAGE_TABLE);
300-
ptw_flush(&swapper_pg_dir[idx]);
353+
if (start < __pa(_end))
354+
start = __pa(_end);
301355

302-
idx = pgd_index(PAGE_OFFSET + 0x40000000);
303-
swapper_pg_dir[idx] = pfn_pgd(virt_to_pfn(&swapper_pmd[PTRS_PER_PMD]), PAGE_TABLE);
304-
ptw_flush(&swapper_pg_dir[idx]);
356+
arc_map_segment_in_mm(mm,
357+
(unsigned long)__va(start),
358+
(unsigned long)__va(end),
359+
PAGE_KERNEL_RW);
360+
}
305361

306-
#endif
362+
return 0;
363+
}
307364

365+
void __init arc_paging_init(void)
366+
{
308367
arc_map_kernel_in_mm(&init_mm);
368+
arc_map_memory_in_mm(&init_mm);
309369

310370
arc_mmu_rtp_set(0, 0, 0);
311371
arc_mmu_rtp_set(1, __pa(swapper_pg_dir), 0);
312372
local_flush_tlb_all();
313373
}
314374

315-
void arc_mmu_init(void)
375+
void __init arc_mmu_init(void)
316376
{
317377
u64 memattr;
318378

@@ -329,8 +389,6 @@ void arc_mmu_init(void)
329389
if ((unsigned long)_end - PAGE_OFFSET > PUD_SIZE)
330390
panic("kernel doesn't fit in PUD (%lu Mb)\n", TO_MB(PUD_SIZE));
331391

332-
write_aux_reg(ARC_REG_MMU_TTBC, MMU_TTBC);
333-
334392
memattr = MEMATTR_NORMAL << (MEMATTR_IDX_NORMAL * 8);
335393
memattr |= MEMATTR_UNCACHED << (MEMATTR_IDX_UNCACHED * 8);
336394
memattr |= MEMATTR_VOLATILE << (MEMATTR_IDX_VOLATILE * 8);

0 commit comments

Comments
 (0)