@@ -82,6 +82,7 @@ config RISCV
8282 select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
8383 select ARCH_WANTS_NO_INSTR
8484 select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
85+ select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
8586 select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
8687 select BUILDTIME_TABLE_SORT if MMU
8788 select CLINT_TIMER if RISCV_M_MODE
@@ -115,6 +116,7 @@ config RISCV
115116 select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
116117 select HARDIRQS_SW_RESEND
117118 select HAS_IOPORT if MMU
119+ select HAVE_ALIGNED_STRUCT_PAGE
118120 select HAVE_ARCH_AUDITSYSCALL
119121 select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
120122 select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
@@ -506,6 +508,39 @@ config NODES_SHIFT
506508 Specify the maximum number of NUMA Nodes available on the target
507509 system. Increases memory reserved to accommodate various tables.
508510
511+ choice
512+ prompt "RISC-V spinlock type"
513+ default RISCV_COMBO_SPINLOCKS
514+
515+ config RISCV_TICKET_SPINLOCKS
516+ bool "Using ticket spinlock"
517+
518+ config RISCV_QUEUED_SPINLOCKS
519+ bool "Using queued spinlock"
520+ depends on SMP && MMU && NONPORTABLE
521+ select ARCH_USE_QUEUED_SPINLOCKS
522+ help
523+ The queued spinlock implementation requires the forward progress
524+ guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
525+ LR/SC with Ziccrse provide such guarantee.
526+
527+ Select this if and only if Zabha or Ziccrse is available on your
528+ platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms
529+ without one of those extensions.
530+
531+ If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks
532+ when supported and otherwise ticket spinlocks.
533+
534+ config RISCV_COMBO_SPINLOCKS
535+ bool "Using combo spinlock"
536+ depends on SMP && MMU
537+ select ARCH_USE_QUEUED_SPINLOCKS
538+ help
539+ Embed both queued spinlock and ticket lock so that the spinlock
540+ implementation can be chosen at runtime.
541+
542+ endchoice
543+
509544config RISCV_ALTERNATIVE
510545 bool
511546 depends on !XIP_KERNEL
@@ -531,6 +566,17 @@ config RISCV_ISA_C
531566
532567 If you don't know what to do here, say Y.
533568
569+ config RISCV_ISA_SUPM
570+ bool "Supm extension for userspace pointer masking"
571+ depends on 64BIT
572+ default y
573+ help
574+ Add support for pointer masking in userspace (Supm) when the
575+ underlying hardware extension (Smnpm or Ssnpm) is detected at boot.
576+
577+ If this option is disabled, userspace will be unable to use
578+ the prctl(PR_{SET,GET}_TAGGED_ADDR_CTRL) API.
579+
534580config RISCV_ISA_SVNAPOT
535581 bool "Svnapot extension support for supervisor mode NAPOT pages"
536582 depends on 64BIT && MMU
@@ -632,6 +678,40 @@ config RISCV_ISA_ZAWRS
632678 use of these instructions in the kernel when the Zawrs extension is
633679 detected at boot.
634680
681+ config TOOLCHAIN_HAS_ZABHA
682+ bool
683+ default y
684+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zabha)
685+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zabha)
686+ depends on AS_HAS_OPTION_ARCH
687+
688+ config RISCV_ISA_ZABHA
689+ bool "Zabha extension support for atomic byte/halfword operations"
690+ depends on TOOLCHAIN_HAS_ZABHA
691+ depends on RISCV_ALTERNATIVE
692+ default y
693+ help
694+ Enable the use of the Zabha ISA-extension to implement kernel
695+ byte/halfword atomic memory operations when it is detected at boot.
696+
697+ If you don't know what to do here, say Y.
698+
699+ config TOOLCHAIN_HAS_ZACAS
700+ bool
701+ default y
702+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zacas)
703+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zacas)
704+ depends on AS_HAS_OPTION_ARCH
705+
706+ config RISCV_ISA_ZACAS
707+ bool "Zacas extension support for atomic CAS"
708+ depends on TOOLCHAIN_HAS_ZACAS
709+ depends on RISCV_ALTERNATIVE
710+ default y
711+ help
712+ Enable the use of the Zacas ISA-extension to implement kernel atomic
713+ cmpxchg operations when it is detected at boot.
714+
635715 If you don't know what to do here, say Y.
636716
637717config TOOLCHAIN_HAS_ZBB
@@ -785,10 +865,24 @@ config THREAD_SIZE_ORDER
785865
786866config RISCV_MISALIGNED
787867 bool
868+ help
869+ Embed support for detecting and emulating misaligned
870+ scalar or vector loads and stores.
871+
872+ config RISCV_SCALAR_MISALIGNED
873+ bool
874+ select RISCV_MISALIGNED
788875 select SYSCTL_ARCH_UNALIGN_ALLOW
789876 help
790877 Embed support for emulating misaligned loads and stores.
791878
879+ config RISCV_VECTOR_MISALIGNED
880+ bool
881+ select RISCV_MISALIGNED
882+ depends on RISCV_ISA_V
883+ help
884+ Enable detecting support for vector misaligned loads and stores.
885+
792886choice
793887 prompt "Unaligned Accesses Support"
794888 default RISCV_PROBE_UNALIGNED_ACCESS
@@ -800,7 +894,7 @@ choice
800894
801895config RISCV_PROBE_UNALIGNED_ACCESS
802896 bool "Probe for hardware unaligned access support"
803- select RISCV_MISALIGNED
897+ select RISCV_SCALAR_MISALIGNED
804898 help
805899 During boot, the kernel will run a series of tests to determine the
806900 speed of unaligned accesses. This probing will dynamically determine
@@ -811,7 +905,7 @@ config RISCV_PROBE_UNALIGNED_ACCESS
811905
812906config RISCV_EMULATED_UNALIGNED_ACCESS
813907 bool "Emulate unaligned access where system support is missing"
814- select RISCV_MISALIGNED
908+ select RISCV_SCALAR_MISALIGNED
815909 help
816910 If unaligned memory accesses trap into the kernel as they are not
817911 supported by the system, the kernel will emulate the unaligned
@@ -840,6 +934,46 @@ config RISCV_EFFICIENT_UNALIGNED_ACCESS
840934
841935endchoice
842936
937+ choice
938+ prompt "Vector unaligned Accesses Support"
939+ depends on RISCV_ISA_V
940+ default RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
941+ help
942+ This determines the level of support for vector unaligned accesses. This
943+ information is used by the kernel to perform optimizations. It is also
944+ exposed to user space via the hwprobe syscall. The hardware will be
945+ probed at boot by default.
946+
947+ config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
948+ bool "Probe speed of vector unaligned accesses"
949+ select RISCV_VECTOR_MISALIGNED
950+ depends on RISCV_ISA_V
951+ help
952+ During boot, the kernel will run a series of tests to determine the
953+ speed of vector unaligned accesses if they are supported. This probing
954+ will dynamically determine the speed of vector unaligned accesses on
955+ the underlying system if they are supported.
956+
957+ config RISCV_SLOW_VECTOR_UNALIGNED_ACCESS
958+ bool "Assume the system supports slow vector unaligned memory accesses"
959+ depends on NONPORTABLE
960+ help
961+ Assume that the system supports slow vector unaligned memory accesses. The
962+ kernel and userspace programs may not be able to run at all on systems
963+ that do not support unaligned memory accesses.
964+
965+ config RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
966+ bool "Assume the system supports fast vector unaligned memory accesses"
967+ depends on NONPORTABLE
968+ help
969+ Assume that the system supports fast vector unaligned memory accesses. When
970+ enabled, this option improves the performance of the kernel on such
971+ systems. However, the kernel and userspace programs will run much more
972+ slowly, or will not be able to run at all, on systems that do not
973+ support efficient unaligned memory accesses.
974+
975+ endchoice
976+
843977source "arch/riscv/Kconfig.vendor"
844978
845979endmenu # "Platform type"
0 commit comments