22
33#include <linux/mm.h>
44#include <linux/types.h>
5+ #include <linux/memblock.h>
56
67#include <asm/arcregs.h>
78#include <asm/mmu_context.h>
89#include <asm/mmu.h>
910#include <asm/setup.h>
1011#include <asm/fixmap.h>
12+ #include <asm/pgalloc.h>
1113
1214/* A copy of the ASID from the PID reg is kept in asid_cache */
1315DEFINE_PER_CPU (unsigned int , asid_cache ) = MM_CTXT_FIRST_CYCLE ;
@@ -230,89 +232,147 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
230232 set_pte (pte , pfn_pte (PFN_DOWN (phys ), prot ));
231233}
232234
233- /*
234- * Map the kernel code/data into page tables for a given @mm
235- *
236- * Assumes
237- * - pgd, pud and pmd are already allocated
238- * - pud is wired up to pgd and pmd to pud
239- *
240- * TODO: assumes 4 levels, implement properly using p*d_addr_end loops
241- */
242- int arc_map_kernel_in_mm (struct mm_struct * mm )
235+ static phys_addr_t __init arc_map_early_alloc_page (void )
243236{
244- unsigned long addr = PAGE_OFFSET ;
245- unsigned long end = PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE ;
237+ phys_addr_t phys ;
238+
239+ /* At early stage we have mapped PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE */
240+ phys = memblock_phys_alloc_range (PAGE_SIZE , PAGE_SIZE , 0 ,
241+ __pa (PAGE_OFFSET + CONFIG_LINUX_MAP_SIZE ));
242+
243+ memset (__va (phys ), 0 , PAGE_SIZE );
244+
245+ return phys ;
246+ }
247+
248+ static int __init arc_map_segment_in_mm (struct mm_struct * mm ,
249+ unsigned long start ,
250+ unsigned long end ,
251+ pgprot_t prot )
252+ {
253+ unsigned long addr ;
254+ phys_addr_t phys ;
246255 pgd_t * pgd ;
247256 p4d_t * p4d ;
248257 pud_t * pud ;
249258 pmd_t * pmd ;
259+ pte_t * pte ;
250260
251- do {
252- pgprot_t prot = PAGE_KERNEL_BLK ;
261+ BUG_ON ( start & ( PAGE_SIZE - 1 ));
262+ BUG_ON ( end & ( PAGE_SIZE - 1 )) ;
253263
264+ /* TODO: Use bigger blocks if possible */
265+ addr = start ;
266+ do {
254267 pgd = pgd_offset (mm , addr );
255- if (pgd_none (* pgd ) || ! pgd_present ( * pgd ))
268+ if (pgd_none (* pgd )){
256269 return 1 ;
270+ }
257271
258272 p4d = p4d_offset (pgd , addr );
259- if (p4d_none (* p4d ) || !p4d_present (* p4d ))
260- return 1 ;
273+ if (p4d_none (* p4d )) {
274+ phys = arc_map_early_alloc_page ();
275+ p4d_populate (mm , p4d , __va (phys ));
276+ }
261277
262278 pud = pud_offset (p4d , addr );
263- if (pud_none (* pud ) || !pud_present (* pud ))
264- return 1 ;
279+ if (pud_none (* pud )) {
280+ phys = arc_map_early_alloc_page ();
281+ pud_populate (mm , pud , __va (phys ));
282+ }
265283
266284 pmd = pmd_offset (pud , addr );
267- if (!pmd_none (* pmd ) || pmd_present (* pmd ))
268- return 1 ;
285+ if (pmd_none (* pmd )) {
286+ phys = arc_map_early_alloc_page ();
287+ pmd_populate_kernel (mm , pmd , __va (phys ));
288+ }
269289
270- set_pmd (pmd , pfn_pmd (virt_to_pfn (addr ), prot ));
271- addr = pmd_addr_end (addr , end );
272- }
273- while (addr != end );
290+ pte = pte_offset_kernel (pmd , addr );
291+
292+ set_pte (pte , pfn_pte (virt_to_pfn (addr ), prot ));
293+
294+ addr += PAGE_SIZE ;
295+ } while (addr < end );
274296
275297 return 0 ;
276298}
277299
278- void arc_paging_init (void )
300+ /*
301+ * Map the kernel code/data into page tables for a given @mm
302+ */
303+ int __init arc_map_kernel_in_mm (struct mm_struct * mm )
279304{
280- #if CONFIG_PGTABLE_LEVELS == 4
281- unsigned int idx ;
305+ extern char __init_text_begin [];
306+ extern char __init_data_begin [];
307+ extern char __init_text_end [];
308+ extern char __init_data_end [];
309+
310+ arc_map_segment_in_mm (mm ,
311+ PAGE_OFFSET ,
312+ (unsigned long ) __init_data_begin ,
313+ PAGE_KERNEL_RWX );
314+ arc_map_segment_in_mm (mm ,
315+ (unsigned long ) __init_data_begin ,
316+ (unsigned long ) __init_data_end ,
317+ PAGE_KERNEL_RW );
318+ arc_map_segment_in_mm (mm ,
319+ (unsigned long ) __init_text_begin ,
320+ (unsigned long ) __init_text_end ,
321+ PAGE_KERNEL_RWX );
322+ arc_map_segment_in_mm (mm ,
323+ (unsigned long ) _stext ,
324+ (unsigned long ) _etext ,
325+ PAGE_KERNEL_RWX );
326+ arc_map_segment_in_mm (mm ,
327+ (unsigned long ) _sdata ,
328+ (unsigned long ) _end ,
329+ PAGE_KERNEL_RW );
282330
283- idx = pgd_index (PAGE_OFFSET );
284- swapper_pg_dir [idx ] = pfn_pgd (virt_to_pfn (swapper_pud ), PAGE_TABLE );
285- ptw_flush (& swapper_pg_dir [idx ]);
331+ return 0 ;
332+ }
286333
287- idx = pud_index (PAGE_OFFSET );
288- swapper_pud [idx ] = pfn_pud (virt_to_pfn (swapper_pmd ), PAGE_TABLE );
289- ptw_flush (& swapper_pud [idx ]);
334+ int __init arc_map_memory_in_mm (struct mm_struct * mm )
335+ {
336+ u64 i ;
337+ phys_addr_t start , end ;
290338
291- idx = pud_index (PAGE_OFFSET + 0x40000000 );
292- swapper_pud [idx ] = pfn_pud (virt_to_pfn (& swapper_pmd [PTRS_PER_PMD ]), PAGE_TABLE );
293- ptw_flush (& swapper_pud [idx ]);
339+ /*
340+ * Kernel (__pa(PAGE_OFFSET) to __pa(_end) is already mapped by
341+ * arc_map_kernel_in_mm(), so map only >= __pa(_end).
342+ *
343+ * We expect that kernel is mapped to the start of physical memory,
344+ * so start >= __pa(PAGE_OFFSET).
345+ */
346+ for_each_mem_range (i , & start , & end ) {
347+ if (start >= end )
348+ break ;
294349
295- #elif CONFIG_PGTABLE_LEVELS == 3
296- unsigned int idx ;
350+ if ( end <= __pa ( _end ))
351+ continue ;
297352
298- idx = pgd_index (PAGE_OFFSET );
299- swapper_pg_dir [idx ] = pfn_pgd (virt_to_pfn (swapper_pmd ), PAGE_TABLE );
300- ptw_flush (& swapper_pg_dir [idx ]);
353+ if (start < __pa (_end ))
354+ start = __pa (_end );
301355
302- idx = pgd_index (PAGE_OFFSET + 0x40000000 );
303- swapper_pg_dir [idx ] = pfn_pgd (virt_to_pfn (& swapper_pmd [PTRS_PER_PMD ]), PAGE_TABLE );
304- ptw_flush (& swapper_pg_dir [idx ]);
356+ arc_map_segment_in_mm (mm ,
357+ (unsigned long )__va (start ),
358+ (unsigned long )__va (end ),
359+ PAGE_KERNEL_RW );
360+ }
305361
306- #endif
362+ return 0 ;
363+ }
307364
365+ void __init arc_paging_init (void )
366+ {
308367 arc_map_kernel_in_mm (& init_mm );
368+ arc_map_memory_in_mm (& init_mm );
309369
310370 arc_mmu_rtp_set (0 , 0 , 0 );
311371 arc_mmu_rtp_set (1 , __pa (swapper_pg_dir ), 0 );
312372 local_flush_tlb_all ();
313373}
314374
315- void arc_mmu_init (void )
375+ void __init arc_mmu_init (void )
316376{
317377 u64 memattr ;
318378
@@ -329,8 +389,6 @@ void arc_mmu_init(void)
329389 if ((unsigned long )_end - PAGE_OFFSET > PUD_SIZE )
330390 panic ("kernel doesn't fit in PUD (%lu Mb)\n" , TO_MB (PUD_SIZE ));
331391
332- write_aux_reg (ARC_REG_MMU_TTBC , MMU_TTBC );
333-
334392 memattr = MEMATTR_NORMAL << (MEMATTR_IDX_NORMAL * 8 );
335393 memattr |= MEMATTR_UNCACHED << (MEMATTR_IDX_UNCACHED * 8 );
336394 memattr |= MEMATTR_VOLATILE << (MEMATTR_IDX_VOLATILE * 8 );
0 commit comments