@@ -941,6 +941,8 @@ static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
941941 data_src -> mem_lock = PERF_MEM_LOCK_LOCKED ;
942942}
943943
944+ /* Be careful. Works only for contiguous MSRs. */
945+ #define ibs_fetch_msr_idx (msr ) (msr - MSR_AMD64_IBSFETCHCTL)
944946#define ibs_op_msr_idx (msr ) (msr - MSR_AMD64_IBSOPCTL)
945947
946948static void perf_ibs_get_data_src (struct perf_ibs_data * ibs_data ,
@@ -1036,6 +1038,67 @@ static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
10361038 return 1 ;
10371039}
10381040
1041+ static bool perf_ibs_is_kernel_data_addr (struct perf_event * event ,
1042+ struct perf_ibs_data * ibs_data )
1043+ {
1044+ u64 sample_type_mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_RAW ;
1045+ union ibs_op_data3 op_data3 ;
1046+ u64 dc_lin_addr ;
1047+
1048+ op_data3 .val = ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSOPDATA3 )];
1049+ dc_lin_addr = ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSDCLINAD )];
1050+
1051+ return unlikely ((event -> attr .sample_type & sample_type_mask ) &&
1052+ op_data3 .dc_lin_addr_valid && kernel_ip (dc_lin_addr ));
1053+ }
1054+
1055+ static bool perf_ibs_is_kernel_br_target (struct perf_event * event ,
1056+ struct perf_ibs_data * ibs_data ,
1057+ int br_target_idx )
1058+ {
1059+ union ibs_op_data op_data ;
1060+ u64 br_target ;
1061+
1062+ op_data .val = ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSOPDATA )];
1063+ br_target = ibs_data -> regs [br_target_idx ];
1064+
1065+ return unlikely ((event -> attr .sample_type & PERF_SAMPLE_RAW ) &&
1066+ op_data .op_brn_ret && kernel_ip (br_target ));
1067+ }
1068+
1069+ static bool perf_ibs_swfilt_discard (struct perf_ibs * perf_ibs , struct perf_event * event ,
1070+ struct pt_regs * regs , struct perf_ibs_data * ibs_data ,
1071+ int br_target_idx )
1072+ {
1073+ if (perf_exclude_event (event , regs ))
1074+ return true;
1075+
1076+ if (perf_ibs != & perf_ibs_op || !event -> attr .exclude_kernel )
1077+ return false;
1078+
1079+ if (perf_ibs_is_kernel_data_addr (event , ibs_data ))
1080+ return true;
1081+
1082+ if (br_target_idx != -1 &&
1083+ perf_ibs_is_kernel_br_target (event , ibs_data , br_target_idx ))
1084+ return true;
1085+
1086+ return false;
1087+ }
1088+
1089+ static void perf_ibs_phyaddr_clear (struct perf_ibs * perf_ibs ,
1090+ struct perf_ibs_data * ibs_data )
1091+ {
1092+ if (perf_ibs == & perf_ibs_op ) {
1093+ ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSOPDATA3 )] &= ~(1ULL << 18 );
1094+ ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSDCPHYSAD )] = 0 ;
1095+ return ;
1096+ }
1097+
1098+ ibs_data -> regs [ibs_fetch_msr_idx (MSR_AMD64_IBSFETCHCTL )] &= ~(1ULL << 52 );
1099+ ibs_data -> regs [ibs_fetch_msr_idx (MSR_AMD64_IBSFETCHPHYSAD )] = 0 ;
1100+ }
1101+
10391102static int perf_ibs_handle_irq (struct perf_ibs * perf_ibs , struct pt_regs * iregs )
10401103{
10411104 struct cpu_perf_ibs * pcpu = this_cpu_ptr (perf_ibs -> pcpu );
@@ -1048,6 +1111,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
10481111 int offset , size , check_rip , offset_max , throttle = 0 ;
10491112 unsigned int msr ;
10501113 u64 * buf , * config , period , new_config = 0 ;
1114+ int br_target_idx = -1 ;
10511115
10521116 if (!test_bit (IBS_STARTED , pcpu -> state )) {
10531117fail :
@@ -1102,6 +1166,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
11021166 if (perf_ibs == & perf_ibs_op ) {
11031167 if (ibs_caps & IBS_CAPS_BRNTRGT ) {
11041168 rdmsrl (MSR_AMD64_IBSBRTARGET , * buf ++ );
1169+ br_target_idx = size ;
11051170 size ++ ;
11061171 }
11071172 if (ibs_caps & IBS_CAPS_OPDATA4 ) {
@@ -1128,16 +1193,20 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
11281193 regs .flags |= PERF_EFLAGS_EXACT ;
11291194 }
11301195
1131- if (perf_ibs == & perf_ibs_op )
1132- perf_ibs_parse_ld_st_data (event -> attr .sample_type , & ibs_data , & data );
1133-
11341196 if ((event -> attr .config2 & IBS_SW_FILTER_MASK ) &&
1135- (perf_exclude_event (event , & regs ) ||
1136- ((data .sample_flags & PERF_SAMPLE_ADDR ) &&
1137- event -> attr .exclude_kernel && kernel_ip (data .addr )))) {
1197+ perf_ibs_swfilt_discard (perf_ibs , event , & regs , & ibs_data , br_target_idx )) {
11381198 throttle = perf_event_account_interrupt (event );
11391199 goto out ;
11401200 }
1201+ /*
1202+ * Prevent leaking physical addresses to unprivileged users. Skip
1203+ * PERF_SAMPLE_PHYS_ADDR check since generic code prevents it for
1204+ * unprivileged users.
1205+ */
1206+ if ((event -> attr .sample_type & PERF_SAMPLE_RAW ) &&
1207+ perf_allow_kernel (& event -> attr )) {
1208+ perf_ibs_phyaddr_clear (perf_ibs , & ibs_data );
1209+ }
11411210
11421211 if (event -> attr .sample_type & PERF_SAMPLE_RAW ) {
11431212 raw = (struct perf_raw_record ){
@@ -1149,6 +1218,9 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
11491218 perf_sample_save_raw_data (& data , event , & raw );
11501219 }
11511220
1221+ if (perf_ibs == & perf_ibs_op )
1222+ perf_ibs_parse_ld_st_data (event -> attr .sample_type , & ibs_data , & data );
1223+
11521224 /*
11531225 * rip recorded by IbsOpRip will not be consistent with rsp and rbp
11541226 * recorded as part of interrupt regs. Thus we need to use rip from
0 commit comments