Skip to content

Commit 43000f0

Browse files
xxkentpavelvkozlov
authored andcommitted
ARCv3: Add ability to change size of the mapped kernel address space
This update add the ability to change the kernel address space from 1Gb up to 2Gb. Tested for HS5x/HS6x with MMUv6_32/MMUv6_48 and page size=4K.
1 parent b3bebda commit 43000f0

File tree

5 files changed

+76
-47
lines changed

5 files changed

+76
-47
lines changed

arch/arc/Kconfig

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -582,6 +582,20 @@ config LINUX_RAM_BASE
582582
cases (such as HSDK), Linux can't be linked at start of DDR, hence
583583
this option.
584584
This needs to match memory start address specified in Device Tree
585+
LINUX_RAM_BASE needs to match LINUX_RAM_BASE in dts file.
586+
587+
config LINUX_MAP_SIZE
588+
hex "Map size"
589+
default "0x40000000"
590+
help
591+
0x40000000 - 1Gb is the default size. You can cahnge it to
592+
0x80000000 - 2Gb to increase kernel space size.
593+
Don't setup it more than 0x6000000 for HS5x when ARC_KVADDR_SIZE==256.
594+
Don't setup it more than 0x5000000 for HS5x when ARC_KVADDR_SIZE==512.
595+
Don't setup it more than 0x80000000 for HS6x.
596+
LINUX_MAP_SIZE needs to match LINUX_RAM_SIZE in dts file.
597+
If you don't know what the above means, leave this setting default.
598+
ARC_KVADDR_SIZE also impacts to LINUX_MAP_SIZE
585599

586600
config HIGHMEM
587601
bool "High Memory Support"

arch/arc/include/asm/processor.h

Lines changed: 3 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -90,17 +90,11 @@ extern unsigned long __get_wchan(struct task_struct *p);
9090
#define TASK_SIZE 0x60000000
9191
#define USER_KERNEL_GUTTER 0
9292

93-
#define FIXADDR_START (PAGE_OFFSET + PUD_SIZE)
94-
9593
#define VMALLOC_START (PAGE_OFFSET + 0x100000000000UL)
9694
#define VMALLOC_SIZE (CONFIG_ARC_KVADDR_SIZE << 20)
9795
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
9896

99-
/*
100-
* How much memory to map before dtb parsing and paging init.
101-
* 1Gb is selected to fit in one PUD for any ARCv3 MMU configurations.
102-
*/
103-
#define EARLY_MAP_SIZE (1 << 30)
97+
#define FIXADDR_START (PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE)
10498

10599
#elif defined(CONFIG_ARC_MMU_V6_32)
106100
/*
@@ -117,17 +111,11 @@ extern unsigned long __get_wchan(struct task_struct *p);
117111
#define TASK_SIZE 0x60000000
118112
#define USER_KERNEL_GUTTER 0
119113

120-
#define VMALLOC_START (PAGE_OFFSET + PUD_SIZE)
114+
#define VMALLOC_START (PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE)
121115
#define VMALLOC_SIZE (CONFIG_ARC_KVADDR_SIZE << 20)
122116
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
123117

124-
#define FIXADDR_START (VMALLOC_START + VMALLOC_SIZE)
125-
126-
/*
127-
* How much memory to map before dtb parsing and paging init.
128-
* 1Gb is selected to fit in one PUD for any ARCv3 MMU configurations.
129-
*/
130-
#define EARLY_MAP_SIZE (1 << 30)
118+
#define FIXADDR_START (0xFF000000)
131119

132120
#else
133121

arch/arc/kernel/head.S

Lines changed: 32 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -89,18 +89,20 @@
8989
;
9090
; Out:
9191
; All registers are clobbered.
92-
.macro BUILD_PAGE_TABLE, link_addr, phy_addr, link_end, pgd, pud, pmd, cur, tmp
92+
.macro BUILD_PAGE_TABLE, link_addr, phy_addr, link_end, pgd, pud, pmd, cur, tmp, val, foo
9393

9494
; FIXME: we need asserts about kernel size <= allocated size
9595
; FIXME: we need a better way to generalize this
9696
#if defined(CONFIG_ARC_MMU_V6_48) && defined(CONFIG_ARC_PAGE_SIZE_4K)
9797

98-
; Two-level page table, PGD + PUD
98+
; Three-level page table, PGD + PUD + PMD
9999
#define BUILD_PGD_ENTRY BUILD_PAGE_TABLE_ENTRY \pgd, \link_addr, \pud, \cur, \tmp, PTRS_PER_PGD, PGDIR_SHIFT, PAGE_KERNEL
100-
#define BUILD_PUD_ENTRY BUILD_PAGE_TABLE_ENTRY \pud, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PUD, PUD_SHIFT, PAGE_KERNEL_BLK
101-
#define BUILD_PMD_ENTRY
102-
#define ENTRY_SIZE PUD_SIZE
103-
#define ENTRY_LABEL .Lbuild_pud_entry\@
100+
#define BUILD_PUD_ENTRY BUILD_PAGE_TABLE_ENTRY \pud, \link_addr, \pmd, \cur, \tmp, PTRS_PER_PUD, PUD_SHIFT, PAGE_KERNEL
101+
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK
102+
#define ENTRY_SIZE PMD_SIZE
103+
#define ENTRY_LABEL .Lbuild_pmd_entry\@
104+
#define NEXT_SIZE PUD_SIZE
105+
#define ENTRY_LABEL_NEXT .Lbuild_pud_entry\@
104106

105107
#elif defined(CONFIG_ARC_MMU_V6_48) && defined(CONFIG_ARC_PAGE_SIZE_16K)
106108

@@ -110,6 +112,8 @@
110112
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK
111113
#define ENTRY_SIZE PMD_SIZE
112114
#define ENTRY_LABEL .Lbuild_pmd_entry\@
115+
#define NEXT_SIZE PUD_SIZE
116+
#define ENTRY_LABEL_NEXT .Lbuild_pud_entry\@
113117

114118
#elif defined(CONFIG_ARC_MMU_V6_48) && defined(CONFIG_ARC_PAGE_SIZE_64K) || defined(CONFIG_ARC_MMU_V6_52) || defined(CONFIG_ARC_MMU_V6_32)
115119

@@ -119,9 +123,14 @@
119123
#define BUILD_PMD_ENTRY BUILD_PAGE_TABLE_ENTRY \pmd, \link_addr, \phy_addr, \cur, \tmp, PTRS_PER_PMD, PMD_SHIFT, PAGE_KERNEL_BLK
120124
#define ENTRY_SIZE PMD_SIZE
121125
#define ENTRY_LABEL .Lbuild_pmd_entry\@
126+
#define NEXT_SIZE PGDIR_SIZE
127+
#define ENTRY_LABEL_NEXT .Lbuild_pgd_entry\@
122128

123129
#endif
124130

131+
ADDR \val, \link_addr, NEXT_SIZE
132+
MOVR \foo, \pmd
133+
125134
.Lbuild_pgd_entry\@:
126135
BUILD_PGD_ENTRY
127136

@@ -136,15 +145,26 @@
136145
ADDR \link_addr, \link_addr, ENTRY_SIZE
137146

138147
; All link addresses are in
139-
CMPR \link_end, \link_addr
140-
ble .Lbuild_end\@
148+
CMPR \link_addr, \link_end
149+
bge .Lbuild_end\@
150+
CMPR \link_addr, \val
151+
bge.d .Linc\@
152+
ADDR \foo, \foo, 0x8
141153
b ENTRY_LABEL
154+
155+
.Linc\@:
156+
MOVR \pmd, \foo
157+
ADDR \val, \foo, NEXT_SIZE
158+
b ENTRY_LABEL_NEXT
159+
142160
.Lbuild_end\@:
143161
#undef BUILD_PGD_ENTRY
144162
#undef BUILD_PUD_ENTRY
145163
#undef BUILD_PMD_ENTRY
146164
#undef ENTRY_SIZE
147165
#undef ENTRY_LABEL
166+
#undef NEXT_SIZE
167+
#undef ENTRY_LABEL_NEXT
148168
.endm
149169

150170
; Create page table with 1:1 mapping in given table pointers.
@@ -196,7 +216,7 @@
196216
SUBR r8, r8, r10
197217
ADDR r8, r8, r11
198218

199-
BUILD_PAGE_TABLE r3, r4, r5, r6, r7, r8, r9, r10
219+
BUILD_PAGE_TABLE r3, r4, r5, r6, r7, r8, r9, r10, r11, r12
200220
.endm
201221

202222
; Create page table with virt to phy mapping in given addresses.
@@ -212,8 +232,8 @@
212232
MOVI r3, PAGE_OFFSET
213233
; phy_addr <- CONFIG_LINUX_LINK_BASE
214234
MOVI r4, CONFIG_LINUX_LINK_BASE
215-
; link_end <- EARLY_MAP_SIZE , see processor.h
216-
MOVI r5, (PAGE_OFFSET + EARLY_MAP_SIZE)
235+
; link_end <- CONFIG_LINUX_MAP_SIZE , see processor.h
236+
MOVI r5, (PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE)
217237

218238
; pgd <- __pa(tbl_pg_dir)
219239
MOVA r6, \tbl_pg_dir
@@ -230,7 +250,7 @@
230250
SUBR r8, r8, r3
231251
ADDR r8, r8, r4
232252

233-
BUILD_PAGE_TABLE r3, r4, r5, r6, r7, r8, r9, r10
253+
BUILD_PAGE_TABLE r3, r4, r5, r6, r7, r8, r9, r10, r11, r12
234254
.endm
235255

236256
; Enable MMU and jump into virtual address space.

arch/arc/mm/init.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,13 @@
1919

2020
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
2121
pud_t swapper_pud[PTRS_PER_PUD] __page_aligned_bss;
22-
pmd_t swapper_pmd[PTRS_PER_PMD] __page_aligned_bss;
22+
pmd_t swapper_pmd[2*PTRS_PER_PMD] __page_aligned_bss; // BUG_ON! swapper_pmd[PTRS_PER_PMD] __page_aligned_bss
2323

2424
#if defined(CONFIG_ISA_ARCV3)
2525
/* Used for early memory map in head.S for ARCv3 */
2626
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
2727
pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
28-
pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
28+
pmd_t early_pmd[2*PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); // BUG_ON! early_pmd[PTRS_PER_PMD] __aligned(PAGE_SIZE)
2929
#endif
3030

3131
char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);

arch/arc/mm/tlb-arcv3.c

Lines changed: 25 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -160,10 +160,6 @@ void __init early_fixmap_init(void)
160160
BUILD_BUG_ON(pmd_index(FIXADDR_START) != \
161161
pmd_index(FIXADDR_START + FIXADDR_SIZE));
162162

163-
/* FIXADDR space must not overlap early mapping. */
164-
BUILD_BUG_ON(FIXADDR_START >= PAGE_OFFSET && \
165-
FIXADDR_START < PAGE_OFFSET + EARLY_MAP_SIZE);
166-
167163
addr = FIXADDR_START;
168164

169165
pgd = (pgd_t *) __va(arc_mmu_rtp_get_addr(1));
@@ -240,26 +236,26 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
240236
int arc_map_kernel_in_mm(struct mm_struct *mm)
241237
{
242238
unsigned long addr = PAGE_OFFSET;
243-
unsigned long end = PAGE_OFFSET + PUD_SIZE;
239+
unsigned long end = PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE;
244240
pgd_t *pgd;
245241
p4d_t *p4d;
246242
pud_t *pud;
247243
pmd_t *pmd;
248244

249-
pgd = pgd_offset(mm, addr);
250-
if (pgd_none(*pgd) || !pgd_present(*pgd))
251-
return 1;
245+
do {
246+
pgprot_t prot = PAGE_KERNEL_BLK;
252247

253-
p4d = p4d_offset(pgd, addr);
254-
if (p4d_none(*p4d) || !p4d_present(*p4d))
255-
return 1;
248+
pgd = pgd_offset(mm, addr);
249+
if (pgd_none(*pgd) || !pgd_present(*pgd))
250+
return 1;
256251

257-
pud = pud_offset(p4d, addr);
258-
if (pud_none(*pud) || !pud_present(*pud))
259-
return 1;
252+
p4d = p4d_offset(pgd, addr);
253+
if (p4d_none(*p4d) || !p4d_present(*p4d))
254+
return 1;
260255

261-
do {
262-
pgprot_t prot = PAGE_KERNEL_BLK;
256+
pud = pud_offset(p4d, addr);
257+
if (pud_none(*pud) || !pud_present(*pud))
258+
return 1;
263259

264260
pmd = pmd_offset(pud, addr);
265261
if (!pmd_none(*pmd) || pmd_present(*pmd))
@@ -285,12 +281,25 @@ void arc_paging_init(void)
285281
idx = pud_index(PAGE_OFFSET);
286282
swapper_pud[idx] = pfn_pud(virt_to_pfn(swapper_pmd), PAGE_TABLE);
287283
ptw_flush(&swapper_pud[idx]);
284+
285+
if(CONFIG_LINUX_MAP_SIZE > 0x40000000) { //Mapping from 1Gb..2Gb
286+
idx = pud_index(PAGE_OFFSET + 0x40000000);
287+
swapper_pud[idx] = pfn_pud(virt_to_pfn(&swapper_pmd[PTRS_PER_PMD]), PAGE_TABLE);
288+
ptw_flush(&swapper_pud[idx]);
289+
}
290+
288291
#elif CONFIG_PGTABLE_LEVELS == 3
289292
unsigned int idx;
290293

291294
idx = pgd_index(PAGE_OFFSET);
292295
swapper_pg_dir[idx] = pfn_pgd(virt_to_pfn(swapper_pmd), PAGE_TABLE);
293296
ptw_flush(&swapper_pg_dir[idx]);
297+
298+
if(CONFIG_LINUX_MAP_SIZE > 0x40000000) { //Mapping from 1Gb..2Gb
299+
idx = pgd_index(PAGE_OFFSET + 0x40000000);
300+
swapper_pg_dir[idx] = pfn_pgd(virt_to_pfn(&swapper_pmd[PTRS_PER_PMD]), PAGE_TABLE);
301+
ptw_flush(&swapper_pg_dir[idx]);
302+
}
294303
#endif
295304

296305
arc_map_kernel_in_mm(&init_mm);
@@ -309,8 +318,6 @@ void arc_mmu_init(void)
309318
*/
310319
/* It is always true when PAGE_OFFSET is aligned to pmd. */
311320
BUILD_BUG_ON(pmd_index(PAGE_OFFSET) != 0);
312-
/* And size of early mapping is lower then PUD. */
313-
BUILD_BUG_ON(EARLY_MAP_SIZE > PUD_SIZE);
314321

315322
if (mmuinfo.pg_sz_k != TO_KB(PAGE_SIZE))
316323
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));

0 commit comments

Comments
 (0)