1616#include <asm/entry-common.h>
1717#include <asm/hwprobe.h>
1818#include <asm/cpufeature.h>
19+ #include <asm/sbi.h>
1920#include <asm/vector.h>
2021
2122#define INSN_MATCH_LB 0x3
@@ -646,7 +647,7 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
646647
647648static bool unaligned_ctl __read_mostly ;
648649
649- void check_unaligned_access_emulated (struct work_struct * work __always_unused )
650+ static void check_unaligned_access_emulated (struct work_struct * work __always_unused )
650651{
651652 int cpu = smp_processor_id ();
652653 long * mas_ptr = per_cpu_ptr (& misaligned_access_speed , cpu );
@@ -657,6 +658,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
657658 __asm__ __volatile__ (
658659 " " REG_L " %[tmp], 1(%[ptr])\n"
659660 : [tmp ] "=r" (tmp_val ) : [ptr ] "r" (& tmp_var ) : "memory" );
661+ }
662+
663+ static int cpu_online_check_unaligned_access_emulated (unsigned int cpu )
664+ {
665+ long * mas_ptr = per_cpu_ptr (& misaligned_access_speed , cpu );
666+
667+ check_unaligned_access_emulated (NULL );
660668
661669 /*
662670 * If unaligned_ctl is already set, this means that we detected that all
@@ -665,9 +673,10 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
665673 */
666674 if (unlikely (unaligned_ctl && (* mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED ))) {
667675 pr_crit ("CPU misaligned accesses non homogeneous (expected all emulated)\n" );
668- while (true)
669- cpu_relax ();
676+ return - EINVAL ;
670677 }
678+
679+ return 0 ;
671680}
672681
673682bool __init check_unaligned_access_emulated_all_cpus (void )
@@ -699,4 +708,60 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
699708{
700709 return false;
701710}
711+ static int cpu_online_check_unaligned_access_emulated (unsigned int cpu )
712+ {
713+ return 0 ;
714+ }
715+ #endif
716+
717+ #ifdef CONFIG_RISCV_SBI
718+
719+ static bool misaligned_traps_delegated ;
720+
721+ static int cpu_online_sbi_unaligned_setup (unsigned int cpu )
722+ {
723+ if (sbi_fwft_set (SBI_FWFT_MISALIGNED_EXC_DELEG , 1 , 0 ) &&
724+ misaligned_traps_delegated ) {
725+ pr_crit ("Misaligned trap delegation non homogeneous (expected delegated)" );
726+ return - EINVAL ;
727+ }
728+
729+ return 0 ;
730+ }
731+
732+ void __init unaligned_access_init (void )
733+ {
734+ int ret ;
735+
736+ ret = sbi_fwft_set_online_cpus (SBI_FWFT_MISALIGNED_EXC_DELEG , 1 , 0 );
737+ if (ret )
738+ return ;
739+
740+ misaligned_traps_delegated = true;
741+ pr_info ("SBI misaligned access exception delegation ok\n" );
742+ /*
743+ * Note that we don't have to take any specific action here, if
744+ * the delegation is successful, then
745+ * check_unaligned_access_emulated() will verify that indeed the
746+ * platform traps on misaligned accesses.
747+ */
748+ }
749+ #else
750+ void __init unaligned_access_init (void ) {}
751+
752+ static int cpu_online_sbi_unaligned_setup (unsigned int cpu __always_unused )
753+ {
754+ return 0 ;
755+ }
702756#endif
757+
758+ int cpu_online_unaligned_access_init (unsigned int cpu )
759+ {
760+ int ret ;
761+
762+ ret = cpu_online_sbi_unaligned_setup (cpu );
763+ if (ret )
764+ return ret ;
765+
766+ return cpu_online_check_unaligned_access_emulated (cpu );
767+ }
0 commit comments