From 4d1827485acecac0016eaaec199e97697afdaa84 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 31 Jul 2023 14:02:11 +0800 Subject: [PATCH 001/299] tools/power/turbostat: Fix failure with new uncore sysfs On some platforms, turbostat fails during launch time like below, turbostat version 2023.03.17 - Len Brown ... cpu40: MSR_IA32_PACKAGE_THERM_STATUS: 0x884c0000 (24 C) cpu40: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x00000003 (100 C, 100 C) turbostat: snapshot_sysfs_counter(/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz): No data available This is because new uncore sysfs is used on these platforms as introduced by commit 9b8dea80e3cb ("platform/x86/intel-uncore-freq: Support for cluster level controls"). With the new uncore sysfs interface, /sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz is still available, but reading it fails. How to support the fabric cluster level uncore sysfs is not settled yet, as a short term fix, clear the BIC_UNCORE_MHZ bit when new sysfs I/F is detected. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9a10512e3407..9de1ff6f82ce 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4163,6 +4163,10 @@ static void intel_uncore_frequency_probe(void) if (access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00", R_OK)) return; + /* Cluster level sysfs not supported yet. */ + if (!access("/sys/devices/system/cpu/intel_uncore_frequency/uncore00", R_OK)) + return; + if (!access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz", R_OK)) BIC_PRESENT(BIC_UNCORE_MHZ); From 137f01b3529d292a68d22e9681e2f903c768f790 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 25 Mar 2023 21:57:07 +0800 Subject: [PATCH 002/299] tools/power/turbostat: Fix a knl bug MSR_KNL_CORE_C6_RESIDENCY should be evaluated only if 1. this is KNL platform AND 2. need to get C6 residency or need to calculate C1 residency Fix the broken logic introduced by commit 1e9042b9c8d4 ("tools/power turbostat: Fix CPU%C1 display value"). Fixes: 1e9042b9c8d4 ("tools/power turbostat: Fix CPU%C1 display value") Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9de1ff6f82ce..fb6c6ddbdb60 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -2180,7 +2180,7 @@ retry: if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; - } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) { + } else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) { if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) return -7; } From b61b7d8c4c22c4298a50ae5d0ee88facb85ce665 Mon Sep 17 00:00:00 2001 From: Chen Yu Date: Mon, 27 Mar 2023 11:17:44 +0800 Subject: [PATCH 003/299] tools/power/turbostat: Enable the C-state Pre-wake printing Currently the C-state Pre-wake will not be printed due to the probe has not been invoked. Invoke the probe function accordingly. Fixes: aeb01e6d71ff ("tools/power turbostat: Print the C-state Pre-wake settings") Signed-off-by: Chen Yu Reviewed-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index fb6c6ddbdb60..03d4f09b103a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5794,6 +5794,7 @@ void process_cpuid() rapl_probe(family, model); perf_limit_reasons_probe(family, model); automatic_cstate_conversion_probe(family, model); + prewake_cstate_probe(family, model); check_tcc_offset(model_orig); From b98a6d78768ec459d394db8bc086d071eb6556c8 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 24 Mar 2023 16:30:55 +0800 Subject: [PATCH 004/299] tools/power/turbostat: Enable TCC Offset on more models All Models that duplicate INTEL_FAM6_CANNONLAKE_L support TCC Offset. Enable this feature on all these models. Delete obsolete model_orig. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 03d4f09b103a..d7880870ef68 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -211,9 +211,6 @@ int *fd_instr_count_percpu; struct timeval interval_tv = { 5, 0 }; struct timespec interval_ts = { 5, 0 }; -/* Save original CPU model */ -unsigned int model_orig; - unsigned int num_iterations; unsigned int header_iterations; unsigned int debug; @@ -4046,14 +4043,7 @@ void check_tcc_offset(int model) switch (model) { case INTEL_FAM6_SKYLAKE_L: - case INTEL_FAM6_SKYLAKE: - case INTEL_FAM6_KABYLAKE_L: - case INTEL_FAM6_KABYLAKE: - case INTEL_FAM6_ICELAKE_L: - case INTEL_FAM6_ICELAKE: - case INTEL_FAM6_TIGERLAKE_L: - case INTEL_FAM6_TIGERLAKE: - case INTEL_FAM6_COMETLAKE: + case INTEL_FAM6_CANNONLAKE_L: if (!get_msr(base_cpu, MSR_PLATFORM_INFO, &msr)) { msr = (msr >> 30) & 1; if (msr) @@ -5573,10 +5563,9 @@ void process_cpuid() edx_flags & (1 << 22) ? "ACPI-TM" : "-", edx_flags & (1 << 28) ? "HT" : "-", edx_flags & (1 << 29) ? "TM" : "-"); } - if (genuine_intel) { - model_orig = model; + + if (genuine_intel) model = intel_model_duplicates(model); - } if (!(edx_flags & (1 << 5))) errx(1, "CPUID: no MSR"); @@ -5796,7 +5785,7 @@ void process_cpuid() automatic_cstate_conversion_probe(family, model); prewake_cstate_probe(family, model); - check_tcc_offset(model_orig); + check_tcc_offset(model); if (!quiet) dump_cstate_pstate_config_info(family, model); From 2c019d657968bdd93e11615e0919d8181a54742d Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 21 Aug 2022 22:39:16 +0800 Subject: [PATCH 005/299] tools/power/turbostat: Support alternative graphics sysfs knobs /sys/class/graphics/fb0/device/drm/card0/ and /sys/class/drm/card0/ point to the same device node. But in some cases, one exists and the other one does not. Prefer to use /sys/class/drm/card0/, and fall back to /sys/class/graphics/fb0/device/drm/card0/. This recovers the "GFXMHz" and "GFXAMHz" columns on some platforms like a SPR server. Reviewed-by: Rodrigo Vivi Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 30 ++++++++++++++++----------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index d7880870ef68..f9bc2230db73 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3188,8 +3188,8 @@ int snapshot_gfx_rc6_ms(void) /* * snapshot_gfx_mhz() * - * record snapshot of - * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz + * fall back to /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz + * when /sys/class/drm/card0/gt_cur_freq_mhz is not available. * * return 1 if config change requires a restart, else return 0 */ @@ -3198,9 +3198,11 @@ int snapshot_gfx_mhz(void) static FILE *fp; int retval; - if (fp == NULL) - fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); - else { + if (fp == NULL) { + fp = fopen("/sys/class/drm/card0/gt_cur_freq_mhz", "r"); + if (!fp) + fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); + } else { rewind(fp); fflush(fp); } @@ -3215,8 +3217,8 @@ int snapshot_gfx_mhz(void) /* * snapshot_gfx_cur_mhz() * - * record snapshot of - * /sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz + * fall back to /sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz + * when /sys/class/drm/card0/gt_act_freq_mhz is not available. * * return 1 if config change requires a restart, else return 0 */ @@ -3225,9 +3227,11 @@ int snapshot_gfx_act_mhz(void) static FILE *fp; int retval; - if (fp == NULL) - fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", "r"); - else { + if (fp == NULL) { + fp = fopen("/sys/class/drm/card0/gt_act_freq_mhz", "r"); + if (!fp) + fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", "r"); + } else { rewind(fp); fflush(fp); } @@ -5804,10 +5808,12 @@ void process_cpuid() if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) BIC_PRESENT(BIC_GFX_rc6); - if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) + if (!access("/sys/class/drm/card0/gt_cur_freq_mhz", R_OK) || + !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) BIC_PRESENT(BIC_GFXMHz); - if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", R_OK)) + if (!access("/sys/class/drm/card0/gt_act_freq_mhz", R_OK) || + !access("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", R_OK)) BIC_PRESENT(BIC_GFXACTMHz); if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK)) From 6d306d6ec7e0f9a5e90f5afd70e3b6c32ae50ce6 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 24 Mar 2023 16:26:25 +0800 Subject: [PATCH 006/299] tools/power/turbostat: Replace raw value cpu model with Macro Kernel already has #define INTEL_FAM6_NEHALEM_G 0x1F /* Auburndale / Havendale */ Use standard Macro for CPU Model instead of raw value. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f9bc2230db73..8d3a8af3692a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5413,7 +5413,7 @@ unsigned int intel_model_duplicates(unsigned int model) switch (model) { case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ - case 0x1F: /* Core i7 and i5 Processor - Nehalem */ + case INTEL_FAM6_NEHALEM_G: /* Core i7 and i5 Processor - Nehalem */ case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */ case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */ return INTEL_FAM6_NEHALEM; From bbfc33b1e49f443296e56d8b76c77373f700aedc Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 24 Mar 2023 16:27:30 +0800 Subject: [PATCH 007/299] tools/power/turbostat: Remove redundant duplicates Remove redundant duplicates in intel_model_duplicates(). Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 8d3a8af3692a..2420300939da 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5412,24 +5412,20 @@ unsigned int intel_model_duplicates(unsigned int model) switch (model) { case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ - case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ case INTEL_FAM6_NEHALEM_G: /* Core i7 and i5 Processor - Nehalem */ case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */ case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */ return INTEL_FAM6_NEHALEM; - case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */ return INTEL_FAM6_NEHALEM_EX; case INTEL_FAM6_XEON_PHI_KNM: return INTEL_FAM6_XEON_PHI_KNL; - case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_BROADWELL_D: /* BDX-DE */ return INTEL_FAM6_BROADWELL_X; - case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: From 48674c1bb6124fe392e8fed80a39fcb3f62e6551 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 27 Mar 2023 12:36:55 +0800 Subject: [PATCH 008/299] tools/power/turbostat: Remove pseudo check for two models INTEL_FAM6_ATOM_SILVERMONT_MID/INTEL_FAM6_ATOM_AIRMONT_MID are not listed in probe_nhm_msrs(). This means that most of the turbostat features are not available on these two platforms. Further more, checking for these two models in has_slv_msrs() is dead code. Because has_slv_msrs() is called by the code guarded by probe_nhm_msrs(). For these two reasons, remove pseudo check for INTEL_FAM6_ATOM_SILVERMONT_MID and INTEL_FAM6_ATOM_AIRMONT_MID. Will add back the support when we can access these two platforms. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 2420300939da..b8874d3dc83e 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3792,8 +3792,6 @@ int has_slv_msrs(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_ATOM_SILVERMONT: - case INTEL_FAM6_ATOM_SILVERMONT_MID: - case INTEL_FAM6_ATOM_AIRMONT_MID: return 1; } return 0; From 45232ab168a3c5abad86eafaef2beed8d7037666 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 21:52:53 +0800 Subject: [PATCH 009/299] tools/power/turbostat: Add skeleton support for table driven feature enumeration Turbostat supports a series of features that may diverge among different CPU models. Current code uses various of CPU model checks in different places to handle this, which makes the code hard to maintain. Add skeleton support for table driven feature enumeration to replace the current error-prone CPU model checks and global variables. Note: by comparing the CPU models with intel-family.h, it is found that turbostat support for below four Models are missing, including INTEL_FAM6_ICELAKE, INTEL_FAM6_ATOM_SILVERMONT_MID, INTEL_FAM6_ATOM_AIRMONT_MID and INTEL_FAM6_ATOM_AIRMONT_NP. Adding support for these models is a different work, thus it is not covered in this patch set. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 192 ++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index b8874d3dc83e..4deea374188a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -281,6 +281,197 @@ unsigned int has_misc_feature_control; unsigned int first_counter_read = 1; int ignore_stdin; +/* Model specific support Start */ + +/* List of features that may diverge among different platforms */ +struct platform_features { +}; + +struct platform_data { + unsigned int model; + const struct platform_features *features; +}; + +static const struct platform_features nhm_features = { +}; + +static const struct platform_features nhx_features = { +}; + +static const struct platform_features snb_features = { +}; + +static const struct platform_features snx_features = { +}; + +static const struct platform_features ivb_features = { +}; + +static const struct platform_features ivx_features = { +}; + +static const struct platform_features hsw_features = { +}; + +static const struct platform_features hsx_features = { +}; + +static const struct platform_features hswl_features = { +}; + +static const struct platform_features hswg_features = { +}; + +static const struct platform_features bdw_features = { +}; + +static const struct platform_features bdwg_features = { +}; + +static const struct platform_features bdx_features = { +}; + +static const struct platform_features skl_features = { +}; + +static const struct platform_features cnl_features = { +}; + +static const struct platform_features skx_features = { +}; + +static const struct platform_features icx_features = { +}; + +static const struct platform_features spr_features = { +}; + +static const struct platform_features slv_features = { +}; + +static const struct platform_features slvd_features = { +}; + +static const struct platform_features amt_features = { +}; + +static const struct platform_features gmt_features = { +}; + +static const struct platform_features gmtd_features = { +}; + +static const struct platform_features gmtp_features = { +}; + +static const struct platform_features tmt_features = { +}; + +static const struct platform_features tmtd_features = { +}; + +static const struct platform_features knl_features = { +}; + +static const struct platform_features default_features = { +}; + +static const struct platform_features amd_features = { +}; + +static const struct platform_data turbostat_pdata[] = { + { INTEL_FAM6_NEHALEM, &nhm_features }, + { INTEL_FAM6_NEHALEM_G, &nhm_features }, + { INTEL_FAM6_NEHALEM_EP, &nhm_features }, + { INTEL_FAM6_NEHALEM_EX, &nhx_features }, + { INTEL_FAM6_WESTMERE, &nhm_features }, + { INTEL_FAM6_WESTMERE_EP, &nhm_features }, + { INTEL_FAM6_WESTMERE_EX, &nhx_features }, + { INTEL_FAM6_SANDYBRIDGE, &snb_features }, + { INTEL_FAM6_SANDYBRIDGE_X, &snx_features }, + { INTEL_FAM6_IVYBRIDGE, &ivb_features }, + { INTEL_FAM6_IVYBRIDGE_X, &ivx_features }, + { INTEL_FAM6_HASWELL, &hsw_features }, + { INTEL_FAM6_HASWELL_X, &hsx_features }, + { INTEL_FAM6_HASWELL_L, &hswl_features }, + { INTEL_FAM6_HASWELL_G, &hswg_features }, + { INTEL_FAM6_BROADWELL, &bdw_features }, + { INTEL_FAM6_BROADWELL_G, &bdwg_features }, + { INTEL_FAM6_BROADWELL_X, &bdx_features }, + { INTEL_FAM6_BROADWELL_D, &bdx_features }, + { INTEL_FAM6_SKYLAKE_L, &skl_features }, + { INTEL_FAM6_SKYLAKE, &skl_features }, + { INTEL_FAM6_SKYLAKE_X, &skx_features }, + { INTEL_FAM6_KABYLAKE_L, &skl_features }, + { INTEL_FAM6_KABYLAKE, &skl_features }, + { INTEL_FAM6_COMETLAKE, &skl_features }, + { INTEL_FAM6_COMETLAKE_L, &skl_features }, + { INTEL_FAM6_CANNONLAKE_L, &cnl_features }, + { INTEL_FAM6_ICELAKE_X, &icx_features }, + { INTEL_FAM6_ICELAKE_D, &icx_features }, + { INTEL_FAM6_ICELAKE_L, &cnl_features }, + { INTEL_FAM6_ICELAKE_NNPI, &cnl_features }, + { INTEL_FAM6_ROCKETLAKE, &cnl_features }, + { INTEL_FAM6_TIGERLAKE_L, &cnl_features }, + { INTEL_FAM6_TIGERLAKE, &cnl_features }, + { INTEL_FAM6_SAPPHIRERAPIDS_X, &spr_features }, + { INTEL_FAM6_EMERALDRAPIDS_X, &spr_features }, + { INTEL_FAM6_LAKEFIELD, &cnl_features }, + { INTEL_FAM6_ALDERLAKE, &cnl_features }, + { INTEL_FAM6_ALDERLAKE_L, &cnl_features }, + { INTEL_FAM6_RAPTORLAKE, &cnl_features }, + { INTEL_FAM6_RAPTORLAKE_P, &cnl_features }, + { INTEL_FAM6_RAPTORLAKE_S, &cnl_features }, + { INTEL_FAM6_METEORLAKE, &cnl_features }, + { INTEL_FAM6_METEORLAKE_L, &cnl_features }, + { INTEL_FAM6_ATOM_SILVERMONT, &slv_features }, + { INTEL_FAM6_ATOM_SILVERMONT_D, &slvd_features }, + { INTEL_FAM6_ATOM_AIRMONT, &amt_features }, + { INTEL_FAM6_ATOM_GOLDMONT, &gmt_features }, + { INTEL_FAM6_ATOM_GOLDMONT_D, &gmtd_features }, + { INTEL_FAM6_ATOM_GOLDMONT_PLUS, &gmtp_features }, + { INTEL_FAM6_ATOM_TREMONT_D, &tmtd_features }, + { INTEL_FAM6_ATOM_TREMONT, &tmt_features }, + { INTEL_FAM6_ATOM_TREMONT_L, &tmt_features }, + { INTEL_FAM6_ATOM_GRACEMONT, &cnl_features }, + { INTEL_FAM6_XEON_PHI_KNL, &knl_features }, + { INTEL_FAM6_XEON_PHI_KNM, &knl_features }, + /* + * Missing support for + * INTEL_FAM6_ICELAKE + * INTEL_FAM6_ATOM_SILVERMONT_MID + * INTEL_FAM6_ATOM_AIRMONT_MID + * INTEL_FAM6_ATOM_AIRMONT_NP + */ + { 0, NULL }, +}; + +static const struct platform_features *platform; + +void probe_platform_features(unsigned int family, unsigned int model) +{ + int i; + + if (authentic_amd || hygon_genuine) { + platform = &amd_features; + return; + } + + platform = &default_features; + + if (!genuine_intel || family != 6) + return; + + for (i = 0; turbostat_pdata[i].features; i++) { + if (turbostat_pdata[i].model == model) { + platform = turbostat_pdata[i].features; + return; + } + } +} + +/* Model specific support End */ + #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ /* 0x611 MSR_PKG_ENERGY_STATUS */ @@ -5562,6 +5753,7 @@ void process_cpuid() edx_flags & (1 << 28) ? "HT" : "-", edx_flags & (1 << 29) ? "TM" : "-"); } + probe_platform_features(family, model); if (genuine_intel) model = intel_model_duplicates(model); From 778fc34a7a3db2811e28ce570318dd047f278cb2 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 21 Aug 2023 15:26:32 +0800 Subject: [PATCH 010/299] tools/power/turbostat: Abstract MSR_MISC_FEATURE_CONTROL support Abstract MSR_MISC_FEATURE_CONTROL support. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 4deea374188a..7eaa0adf72e0 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -277,7 +277,6 @@ unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ -unsigned int has_misc_feature_control; unsigned int first_counter_read = 1; int ignore_stdin; @@ -285,6 +284,7 @@ int ignore_stdin; /* List of features that may diverge among different platforms */ struct platform_features { + bool has_msr_misc_feature_control; /* MSR_MISC_FEATURE_CONTROL */ }; struct platform_data { @@ -299,51 +299,67 @@ static const struct platform_features nhx_features = { }; static const struct platform_features snb_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features snx_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features ivb_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features ivx_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features hsw_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features hsx_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features hswl_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features hswg_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features bdw_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features bdwg_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features bdx_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features skl_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features cnl_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features skx_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features icx_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features spr_features = { + .has_msr_misc_feature_control = 1, }; static const struct platform_features slv_features = { @@ -3883,7 +3899,6 @@ void check_permissions(void) * * Side effect: * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL - * sets has_misc_feature_control */ int probe_nhm_msrs(unsigned int family, unsigned int model) { @@ -3909,7 +3924,6 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ pkg_cstate_limits = snb_pkg_cstate_limits; - has_misc_feature_control = 1; break; case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_G: /* HSW */ @@ -3921,16 +3935,13 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ pkg_cstate_limits = hsw_pkg_cstate_limits; - has_misc_feature_control = 1; break; case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ pkg_cstate_limits = skx_pkg_cstate_limits; - has_misc_feature_control = 1; break; case INTEL_FAM6_ICELAKE_X: /* ICX */ pkg_cstate_limits = icx_pkg_cstate_limits; - has_misc_feature_control = 1; break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ no_MSR_MISC_PWR_MGMT = 1; @@ -5541,7 +5552,7 @@ void decode_misc_feature_control(void) { unsigned long long msr; - if (!has_misc_feature_control) + if (!platform->has_msr_misc_feature_control) return; if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr)) From 3dd0e7547d11e770bafb40ad41f2631cc4b16649 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 21 Aug 2023 22:12:57 +0800 Subject: [PATCH 011/299] tools/power/turbostat: Abstract MSR_MISC_PWR_MGMT support Abstract MSR_MISC_PWR_MGMT support. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 31 +++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 7eaa0adf72e0..9507f310e212 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -238,7 +238,6 @@ unsigned int hygon_genuine; unsigned int max_level, max_extended_level; unsigned int has_invariant_tsc; unsigned int do_nhm_platform_info; -unsigned int no_MSR_MISC_PWR_MGMT; unsigned int aperf_mperf_multiplier = 1; double bclk; double base_hz; @@ -285,6 +284,7 @@ int ignore_stdin; /* List of features that may diverge among different platforms */ struct platform_features { bool has_msr_misc_feature_control; /* MSR_MISC_FEATURE_CONTROL */ + bool has_msr_misc_pwr_mgmt; /* MSR_MISC_PWR_MGMT */ }; struct platform_data { @@ -293,100 +293,125 @@ struct platform_data { }; static const struct platform_features nhm_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features nhx_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features snb_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features snx_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features ivb_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features ivx_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features hsw_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features hsx_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features hswl_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features hswg_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features bdw_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features bdwg_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features bdx_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features skl_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features cnl_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features skx_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features icx_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features spr_features = { .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features slv_features = { }; static const struct platform_features slvd_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features amt_features = { }; static const struct platform_features gmt_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features gmtd_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features gmtp_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features tmt_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features tmtd_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features knl_features = { + .has_msr_misc_pwr_mgmt = 1, }; static const struct platform_features default_features = { @@ -3944,14 +3969,12 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) pkg_cstate_limits = icx_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ - no_MSR_MISC_PWR_MGMT = 1; /* FALLTHRU */ case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ pkg_cstate_limits = slv_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ pkg_cstate_limits = amt_pkg_cstate_limits; - no_MSR_MISC_PWR_MGMT = 1; break; case INTEL_FAM6_XEON_PHI_KNL: /* PHI */ pkg_cstate_limits = phi_pkg_cstate_limits; @@ -5576,7 +5599,7 @@ void decode_misc_pwr_mgmt_msr(void) if (!do_nhm_platform_info) return; - if (no_MSR_MISC_PWR_MGMT) + if (!platform->has_msr_misc_pwr_mgmt) return; if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr)) From 71e841293c715797d8c6ae8cdc3f74b4396c5570 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 21 Aug 2023 22:22:48 +0800 Subject: [PATCH 012/299] tools/power/turbostat: Abstract BCLK frequency support Abstract CPU base clock frequency support. Note that bclk is used by 1. calculate base_hz using MSR_PLATFORM_INFO, which is guarded by probe_nhm_msrs(). 2. dump MSR_PLATFORM_INFO and Turbo Ratio Limit MSRs, which are also guarded by probe_nhm_msrs(). Thus probe_bclk() works for probe_nhm_msrs() models only. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 131 +++++++++++++++++--------- 1 file changed, 87 insertions(+), 44 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9507f310e212..66ba70017d53 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -269,7 +269,6 @@ unsigned int do_ring_perf_limit_reasons; unsigned int crystal_hz; unsigned long long tsc_hz; int base_cpu; -double discover_bclk(unsigned int family, unsigned int model); unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ @@ -279,12 +278,15 @@ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ unsigned int first_counter_read = 1; int ignore_stdin; +int get_msr(int cpu, off_t offset, unsigned long long *msr); + /* Model specific support Start */ /* List of features that may diverge among different platforms */ struct platform_features { bool has_msr_misc_feature_control; /* MSR_MISC_FEATURE_CONTROL */ bool has_msr_misc_pwr_mgmt; /* MSR_MISC_PWR_MGMT */ + int bclk_freq; /* CPU base clock */ }; struct platform_data { @@ -292,126 +294,185 @@ struct platform_data { const struct platform_features *features; }; +/* For BCLK */ +enum bclk_freq { + BCLK_100MHZ = 1, + BCLK_133MHZ, + BCLK_SLV, +}; + +#define SLM_BCLK_FREQS 5 +double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0 }; + +double slm_bclk(void) +{ + unsigned long long msr = 3; + unsigned int i; + double freq; + + if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) + fprintf(outf, "SLM BCLK: unknown\n"); + + i = msr & 0xf; + if (i >= SLM_BCLK_FREQS) { + fprintf(outf, "SLM BCLK[%d] invalid\n", i); + i = 3; + } + freq = slm_freq_table[i]; + + if (!quiet) + fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); + + return freq; +} + static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_133MHZ, }; static const struct platform_features nhx_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_133MHZ, }; static const struct platform_features snb_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features snx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features ivb_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features ivx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features hsw_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features hsx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features hswl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features hswg_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features bdw_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features bdwg_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features bdx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features skl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features cnl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features skx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features icx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features spr_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features slv_features = { + .bclk_freq = BCLK_SLV, }; static const struct platform_features slvd_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_SLV, }; static const struct platform_features amt_features = { + .bclk_freq = BCLK_133MHZ, }; static const struct platform_features gmt_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features gmtd_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features gmtp_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features tmtd_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features knl_features = { .has_msr_misc_pwr_mgmt = 1, + .bclk_freq = BCLK_100MHZ, }; static const struct platform_features default_features = { @@ -3907,6 +3968,30 @@ void check_permissions(void) exit(-6); } +void probe_bclk(void) +{ + unsigned long long msr; + unsigned int base_ratio; + + if (!do_nhm_platform_info) + return; + + if (platform->bclk_freq == BCLK_100MHZ) + bclk = 100.00; + else if (platform->bclk_freq == BCLK_133MHZ) + bclk = 133.33; + else if (platform->bclk_freq == BCLK_SLV) + bclk = slm_bclk(); + else + return; + + get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); + base_ratio = (msr >> 8) & 0xFF; + + base_hz = base_ratio * bclk * 1000000; + has_base_hz = 1; +} + /* * NHM adds support for additional MSRs: * @@ -3928,7 +4013,6 @@ void check_permissions(void) int probe_nhm_msrs(unsigned int family, unsigned int model) { unsigned long long msr; - unsigned int base_ratio; int *pkg_cstate_limits; if (!genuine_intel) @@ -3937,8 +4021,6 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) if (family != 6) return 0; - bclk = discover_bclk(family, model); - switch (model) { case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ @@ -3992,11 +4074,6 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; - get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); - base_ratio = (msr >> 8) & 0xFF; - - base_hz = base_ratio * bclk * 1000000; - has_base_hz = 1; return 1; } @@ -5403,41 +5480,6 @@ unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) return 1; } -#define SLM_BCLK_FREQS 5 -double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0 }; - -double slm_bclk(void) -{ - unsigned long long msr = 3; - unsigned int i; - double freq; - - if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) - fprintf(outf, "SLM BCLK: unknown\n"); - - i = msr & 0xf; - if (i >= SLM_BCLK_FREQS) { - fprintf(outf, "SLM BCLK[%d] invalid\n", i); - i = 3; - } - freq = slm_freq_table[i]; - - if (!quiet) - fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); - - return freq; -} - -double discover_bclk(unsigned int family, unsigned int model) -{ - if (has_snb_msrs(family, model) || is_knl(family, model)) - return 100.00; - else if (is_slm(family, model)) - return slm_bclk(); - else - return 133.33; -} - int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned int eax, ebx, ecx, edx; @@ -5929,6 +5971,7 @@ void process_cpuid() BIC_PRESENT(BIC_CPU_c6); BIC_PRESENT(BIC_SMI); } + probe_bclk(); do_snb_cstates = has_snb_msrs(family, model); if (do_snb_cstates) From 3989fc890782c8002477895e9f24ffb98a132293 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 22:37:06 +0800 Subject: [PATCH 013/299] tools/power/turbostat: Abstract Package cstate limit decoding support Abstract the support for decoding package cstate limit from MSR_PKG_CST_CONFIG_CONTROL. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 112 ++++++++++++++++++++------ 1 file changed, 86 insertions(+), 26 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 66ba70017d53..bcad9332a3b2 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -287,6 +287,7 @@ struct platform_features { bool has_msr_misc_feature_control; /* MSR_MISC_FEATURE_CONTROL */ bool has_msr_misc_pwr_mgmt; /* MSR_MISC_PWR_MGMT */ int bclk_freq; /* CPU base clock */ + int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ }; struct platform_data { @@ -326,153 +327,193 @@ double slm_bclk(void) return freq; } +/* For Package cstate limit */ +enum package_cstate_limit { + CST_LIMIT_NHM = 1, + CST_LIMIT_SNB, + CST_LIMIT_HSW, + CST_LIMIT_SKX, + CST_LIMIT_ICX, + CST_LIMIT_SLV, + CST_LIMIT_AMT, + CST_LIMIT_KNL, + CST_LIMIT_GMT, +}; + static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_133MHZ, + .cst_limit = CST_LIMIT_NHM, }; static const struct platform_features nhx_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_133MHZ, + .cst_limit = CST_LIMIT_NHM, }; static const struct platform_features snb_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_SNB, }; static const struct platform_features snx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_SNB, }; static const struct platform_features ivb_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_SNB, }; static const struct platform_features ivx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_SNB, }; static const struct platform_features hsw_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features hsx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features hswl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features hswg_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features bdw_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features bdwg_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features bdx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features skl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features cnl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_HSW, }; static const struct platform_features skx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_SKX, }; static const struct platform_features icx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_ICX, }; static const struct platform_features spr_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_SKX, }; static const struct platform_features slv_features = { .bclk_freq = BCLK_SLV, + .cst_limit = CST_LIMIT_SLV, }; static const struct platform_features slvd_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_SLV, + .cst_limit = CST_LIMIT_SLV, }; static const struct platform_features amt_features = { .bclk_freq = BCLK_133MHZ, + .cst_limit = CST_LIMIT_AMT, }; static const struct platform_features gmt_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features gmtd_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features gmtp_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features tmtd_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features knl_features = { .has_msr_misc_pwr_mgmt = 1, .bclk_freq = BCLK_100MHZ, + .cst_limit = CST_LIMIT_KNL, }; static const struct platform_features default_features = { @@ -2704,6 +2745,50 @@ int icx_pkg_cstate_limits[16] = PCLRSV, PCLRSV }; +void probe_cst_limit(void) +{ + unsigned long long msr; + int *pkg_cstate_limits; + + if (!do_nhm_platform_info) + return; + + switch (platform->cst_limit) { + case CST_LIMIT_NHM: + pkg_cstate_limits = nhm_pkg_cstate_limits; + break; + case CST_LIMIT_SNB: + pkg_cstate_limits = snb_pkg_cstate_limits; + break; + case CST_LIMIT_HSW: + pkg_cstate_limits = hsw_pkg_cstate_limits; + break; + case CST_LIMIT_SKX: + pkg_cstate_limits = skx_pkg_cstate_limits; + break; + case CST_LIMIT_ICX: + pkg_cstate_limits = icx_pkg_cstate_limits; + break; + case CST_LIMIT_SLV: + pkg_cstate_limits = slv_pkg_cstate_limits; + break; + case CST_LIMIT_AMT: + pkg_cstate_limits = amt_pkg_cstate_limits; + break; + case CST_LIMIT_KNL: + pkg_cstate_limits = phi_pkg_cstate_limits; + break; + case CST_LIMIT_GMT: + pkg_cstate_limits = glm_pkg_cstate_limits; + break; + default: + return; + } + + get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); + pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; +} + static void calculate_tsc_tweak() { tsc_tweak = base_hz / tsc_hz; @@ -4006,15 +4091,9 @@ void probe_bclk(void) * MSR_PKG_C6_RESIDENCY 0x000003f9 * MSR_CORE_C3_RESIDENCY 0x000003fc * MSR_CORE_C6_RESIDENCY 0x000003fd - * - * Side effect: - * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL */ int probe_nhm_msrs(unsigned int family, unsigned int model) { - unsigned long long msr; - int *pkg_cstate_limits; - if (!genuine_intel) return 0; @@ -4024,14 +4103,10 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) switch (model) { case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ - pkg_cstate_limits = nhm_pkg_cstate_limits; - break; case INTEL_FAM6_SANDYBRIDGE: /* SNB */ case INTEL_FAM6_SANDYBRIDGE_X: /* SNB Xeon */ case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ - pkg_cstate_limits = snb_pkg_cstate_limits; - break; case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ @@ -4041,38 +4116,22 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - pkg_cstate_limits = hsw_pkg_cstate_limits; - break; case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ - pkg_cstate_limits = skx_pkg_cstate_limits; - break; case INTEL_FAM6_ICELAKE_X: /* ICX */ - pkg_cstate_limits = icx_pkg_cstate_limits; - break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ - /* FALLTHRU */ case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ - pkg_cstate_limits = slv_pkg_cstate_limits; - break; case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ - pkg_cstate_limits = amt_pkg_cstate_limits; - break; case INTEL_FAM6_XEON_PHI_KNL: /* PHI */ - pkg_cstate_limits = phi_pkg_cstate_limits; - break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ case INTEL_FAM6_ATOM_TREMONT: /* EHL */ case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ - pkg_cstate_limits = glm_pkg_cstate_limits; break; default: return 0; } - get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); - pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; return 1; } @@ -5964,6 +6023,7 @@ void process_cpuid() BIC_PRESENT(BIC_IRQ); BIC_PRESENT(BIC_TSC_MHz); + probe_cst_limit(); if (probe_nhm_msrs(family, model)) { do_nhm_platform_info = 1; BIC_PRESENT(BIC_CPU_c1); From fcfa1ce074ab76272639961d4d7900b91657a8d5 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 25 Aug 2023 16:52:23 +0800 Subject: [PATCH 014/299] tools/power/turbostat: Abstract Nehalem MSRs support MSR_PLATFORM_INFO, MSR_IA32_TEMPERATURE_TARGET, MSR_SMI_COUNT, MSR_PKG_CST_CONFIG_CONTROL, and the TRL MSRs are always available for platforms since Nehalem. Support for these msrs can be described altogether. Abstract the support for these MSRs. Delete probe_nhm_msrs() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 101 +++++++++----------------- 1 file changed, 34 insertions(+), 67 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bcad9332a3b2..bc221e800aec 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -237,7 +237,6 @@ unsigned int authentic_amd; unsigned int hygon_genuine; unsigned int max_level, max_extended_level; unsigned int has_invariant_tsc; -unsigned int do_nhm_platform_info; unsigned int aperf_mperf_multiplier = 1; double bclk; double base_hz; @@ -286,6 +285,7 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr); struct platform_features { bool has_msr_misc_feature_control; /* MSR_MISC_FEATURE_CONTROL */ bool has_msr_misc_pwr_mgmt; /* MSR_MISC_PWR_MGMT */ + bool has_nhm_msrs; /* MSR_PLATFORM_INFO, MSR_IA32_TEMPERATURE_TARGET, MSR_SMI_COUNT, MSR_PKG_CST_CONFIG_CONTROL, TRL MSRs */ int bclk_freq; /* CPU base clock */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ }; @@ -342,12 +342,14 @@ enum package_cstate_limit { static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, .cst_limit = CST_LIMIT_NHM, }; static const struct platform_features nhx_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, .cst_limit = CST_LIMIT_NHM, }; @@ -355,6 +357,7 @@ static const struct platform_features nhx_features = { static const struct platform_features snb_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, }; @@ -362,6 +365,7 @@ static const struct platform_features snb_features = { static const struct platform_features snx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, }; @@ -369,6 +373,7 @@ static const struct platform_features snx_features = { static const struct platform_features ivb_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, }; @@ -376,6 +381,7 @@ static const struct platform_features ivb_features = { static const struct platform_features ivx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, }; @@ -383,6 +389,7 @@ static const struct platform_features ivx_features = { static const struct platform_features hsw_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -390,6 +397,7 @@ static const struct platform_features hsw_features = { static const struct platform_features hsx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -397,6 +405,7 @@ static const struct platform_features hsx_features = { static const struct platform_features hswl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -404,6 +413,7 @@ static const struct platform_features hswl_features = { static const struct platform_features hswg_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -411,6 +421,7 @@ static const struct platform_features hswg_features = { static const struct platform_features bdw_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -418,6 +429,7 @@ static const struct platform_features bdw_features = { static const struct platform_features bdwg_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -425,6 +437,7 @@ static const struct platform_features bdwg_features = { static const struct platform_features bdx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -432,6 +445,7 @@ static const struct platform_features bdx_features = { static const struct platform_features skl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -439,6 +453,7 @@ static const struct platform_features skl_features = { static const struct platform_features cnl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, }; @@ -446,6 +461,7 @@ static const struct platform_features cnl_features = { static const struct platform_features skx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SKX, }; @@ -453,6 +469,7 @@ static const struct platform_features skx_features = { static const struct platform_features icx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_ICX, }; @@ -460,58 +477,68 @@ static const struct platform_features icx_features = { static const struct platform_features spr_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SKX, }; static const struct platform_features slv_features = { + .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, .cst_limit = CST_LIMIT_SLV, }; static const struct platform_features slvd_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, .cst_limit = CST_LIMIT_SLV, }; static const struct platform_features amt_features = { + .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, .cst_limit = CST_LIMIT_AMT, }; static const struct platform_features gmt_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features gmtd_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features gmtp_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features tmtd_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, }; static const struct platform_features knl_features = { .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_KNL, }; @@ -2750,7 +2777,7 @@ void probe_cst_limit(void) unsigned long long msr; int *pkg_cstate_limits; - if (!do_nhm_platform_info) + if (!platform->has_nhm_msrs) return; switch (platform->cst_limit) { @@ -4058,7 +4085,7 @@ void probe_bclk(void) unsigned long long msr; unsigned int base_ratio; - if (!do_nhm_platform_info) + if (!platform->has_nhm_msrs) return; if (platform->bclk_freq == BCLK_100MHZ) @@ -4077,65 +4104,6 @@ void probe_bclk(void) has_base_hz = 1; } -/* - * NHM adds support for additional MSRs: - * - * MSR_SMI_COUNT 0x00000034 - * - * MSR_PLATFORM_INFO 0x000000ce - * MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 - * - * MSR_MISC_PWR_MGMT 0x000001aa - * - * MSR_PKG_C3_RESIDENCY 0x000003f8 - * MSR_PKG_C6_RESIDENCY 0x000003f9 - * MSR_CORE_C3_RESIDENCY 0x000003fc - * MSR_CORE_C6_RESIDENCY 0x000003fd - */ -int probe_nhm_msrs(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ - case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ - case INTEL_FAM6_SANDYBRIDGE: /* SNB */ - case INTEL_FAM6_SANDYBRIDGE_X: /* SNB Xeon */ - case INTEL_FAM6_IVYBRIDGE: /* IVB */ - case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ - case INTEL_FAM6_HASWELL: /* HSW */ - case INTEL_FAM6_HASWELL_G: /* HSW */ - case INTEL_FAM6_HASWELL_X: /* HSX */ - case INTEL_FAM6_HASWELL_L: /* HSW */ - case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_G: /* BDW */ - case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_L: /* SKL */ - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - case INTEL_FAM6_SKYLAKE_X: /* SKX */ - case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ - case INTEL_FAM6_ICELAKE_X: /* ICX */ - case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ - case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ - case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ - case INTEL_FAM6_XEON_PHI_KNL: /* PHI */ - case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ - case INTEL_FAM6_ATOM_TREMONT: /* EHL */ - case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ - break; - default: - return 0; - } - - return 1; -} - /* * SLV client has support for unique MSRs: * @@ -4461,7 +4429,7 @@ static void dump_turbo_ratio_info(unsigned int family, unsigned int model) static void dump_cstate_pstate_config_info(unsigned int family, unsigned int model) { - if (!do_nhm_platform_info) + if (!platform->has_nhm_msrs) return; dump_nhm_platform_info(); @@ -5606,7 +5574,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk } /* Temperature Target MSR is Nehalem and newer only */ - if (!do_nhm_platform_info) + if (!platform->has_nhm_msrs) goto guess; if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) @@ -5697,7 +5665,7 @@ void decode_misc_pwr_mgmt_msr(void) { unsigned long long msr; - if (!do_nhm_platform_info) + if (!platform->has_nhm_msrs) return; if (!platform->has_msr_misc_pwr_mgmt) @@ -6024,8 +5992,7 @@ void process_cpuid() BIC_PRESENT(BIC_TSC_MHz); probe_cst_limit(); - if (probe_nhm_msrs(family, model)) { - do_nhm_platform_info = 1; + if (platform->has_nhm_msrs) { BIC_PRESENT(BIC_CPU_c1); BIC_PRESENT(BIC_CPU_c3); BIC_PRESENT(BIC_CPU_c6); From c2c25e85df316a624e4a8ee85d65ea29265f486d Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 27 Mar 2023 14:57:06 +0800 Subject: [PATCH 015/299] tools/power/turbostat: Remove a redundant check Platforms with has_msr_misc_pwr_mgmt set is a subset of platforms with has_nhm_msrs set. Thus remove the redudant check for platform->has_nhm_msrs. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bc221e800aec..4eb10491f714 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5665,9 +5665,6 @@ void decode_misc_pwr_mgmt_msr(void) { unsigned long long msr; - if (!platform->has_nhm_msrs) - return; - if (!platform->has_msr_misc_pwr_mgmt) return; From 8b7199c0855e3b22532a21aaaed78e699431e4e5 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 27 Mar 2023 14:58:35 +0800 Subject: [PATCH 016/299] tools/power/turbostat: Rename some functions Rename dump_nhm_platform_info() and dump_nhm_cst_cfg() to dump_platform_info() and dump_cst_cfg() because these MSRs' behavior is consistent when they're available. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 4eb10491f714..654ae1ce130c 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -2823,7 +2823,7 @@ static void calculate_tsc_tweak() void prewake_cstate_probe(unsigned int family, unsigned int model); -static void dump_nhm_platform_info(void) +static void dump_platform_info(void) { unsigned long long msr; unsigned int ratio; @@ -3059,7 +3059,7 @@ static void dump_knl_turbo_ratio_limits(void) ratio[i], bclk, ratio[i] * bclk, cores[i]); } -static void dump_nhm_cst_cfg(void) +static void dump_cst_cfg(void) { unsigned long long msr; @@ -4432,9 +4432,9 @@ static void dump_cstate_pstate_config_info(unsigned int family, unsigned int mod if (!platform->has_nhm_msrs) return; - dump_nhm_platform_info(); + dump_platform_info(); dump_turbo_ratio_info(family, model); - dump_nhm_cst_cfg(); + dump_cst_cfg(); } static int read_sysfs_int(char *path) From 10d85d85ab4f3ae7faca4450ce7b0a166fd396f0 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 8 Sep 2023 23:16:00 +0800 Subject: [PATCH 017/299] tools/power/turbostat: Abstract Turbo Ratio Limit MSRs support Abstract the support for MSR_TURBO_RATIO_LIMIT, MSR_TRUBO_RATIO_LIMIT1, MSR_TURBO_RATIO_LIMIT2, MSR_SECONDARY_TURBO_RATIO_LIMIT, MSR_ATOM_CORE_RATIOS and MSR_ATOM_CORE_TURBO_RATIOS. Delete has_turbo_ratio_group_limits(), has_turbo_ratio_limit(), has_atom_turbo_ratio_limit(), has_ivt_turbo_ratio_limit(), has_hsw_turbo_ratio_limit(), has_knl_turbo_ratio_limit() and has_glm_turbo_ratio_limit() CPU model checks. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 171 +++++++------------------- 1 file changed, 46 insertions(+), 125 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 654ae1ce130c..b6c53ec740d3 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -288,6 +288,7 @@ struct platform_features { bool has_nhm_msrs; /* MSR_PLATFORM_INFO, MSR_IA32_TEMPERATURE_TARGET, MSR_SMI_COUNT, MSR_PKG_CST_CONFIG_CONTROL, TRL MSRs */ int bclk_freq; /* CPU base clock */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ + int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ }; struct platform_data { @@ -340,11 +341,22 @@ enum package_cstate_limit { CST_LIMIT_GMT, }; +/* For Turbo Ratio Limit MSRs */ +enum turbo_ratio_limit_msrs { + TRL_BASE = BIT(0), + TRL_LIMIT1 = BIT(1), + TRL_LIMIT2 = BIT(2), + TRL_ATOM = BIT(3), + TRL_KNL = BIT(4), + TRL_CORECOUNT = BIT(5), +}; + static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, .cst_limit = CST_LIMIT_NHM, + .trl_msrs = TRL_BASE, }; static const struct platform_features nhx_features = { @@ -360,6 +372,7 @@ static const struct platform_features snb_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, + .trl_msrs = TRL_BASE, }; static const struct platform_features snx_features = { @@ -368,6 +381,7 @@ static const struct platform_features snx_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, + .trl_msrs = TRL_BASE, }; static const struct platform_features ivb_features = { @@ -376,6 +390,7 @@ static const struct platform_features ivb_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, + .trl_msrs = TRL_BASE, }; static const struct platform_features ivx_features = { @@ -384,6 +399,7 @@ static const struct platform_features ivx_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, + .trl_msrs = TRL_BASE | TRL_LIMIT1, }; static const struct platform_features hsw_features = { @@ -392,6 +408,7 @@ static const struct platform_features hsw_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE, }; static const struct platform_features hsx_features = { @@ -400,6 +417,7 @@ static const struct platform_features hsx_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, }; static const struct platform_features hswl_features = { @@ -408,6 +426,7 @@ static const struct platform_features hswl_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE, }; static const struct platform_features hswg_features = { @@ -416,6 +435,7 @@ static const struct platform_features hswg_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE, }; static const struct platform_features bdw_features = { @@ -424,6 +444,7 @@ static const struct platform_features bdw_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE, }; static const struct platform_features bdwg_features = { @@ -432,6 +453,7 @@ static const struct platform_features bdwg_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE, }; static const struct platform_features bdx_features = { @@ -440,6 +462,7 @@ static const struct platform_features bdx_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE, }; static const struct platform_features skl_features = { @@ -448,6 +471,7 @@ static const struct platform_features skl_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE, }; static const struct platform_features cnl_features = { @@ -456,6 +480,7 @@ static const struct platform_features cnl_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .trl_msrs = TRL_BASE, }; static const struct platform_features skx_features = { @@ -464,6 +489,7 @@ static const struct platform_features skx_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SKX, + .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; static const struct platform_features icx_features = { @@ -472,6 +498,7 @@ static const struct platform_features icx_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_ICX, + .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; static const struct platform_features spr_features = { @@ -480,12 +507,14 @@ static const struct platform_features spr_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SKX, + .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; static const struct platform_features slv_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, .cst_limit = CST_LIMIT_SLV, + .trl_msrs = TRL_ATOM, }; static const struct platform_features slvd_features = { @@ -493,12 +522,14 @@ static const struct platform_features slvd_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, .cst_limit = CST_LIMIT_SLV, + .trl_msrs = TRL_BASE, }; static const struct platform_features amt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, .cst_limit = CST_LIMIT_AMT, + .trl_msrs = TRL_BASE, }; static const struct platform_features gmt_features = { @@ -506,6 +537,7 @@ static const struct platform_features gmt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, + .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; static const struct platform_features gmtd_features = { @@ -513,6 +545,7 @@ static const struct platform_features gmtd_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, + .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; static const struct platform_features gmtp_features = { @@ -520,6 +553,7 @@ static const struct platform_features gmtp_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, + .trl_msrs = TRL_BASE, }; static const struct platform_features tmt_features = { @@ -527,6 +561,7 @@ static const struct platform_features tmt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, + .trl_msrs = TRL_BASE, }; static const struct platform_features tmtd_features = { @@ -534,6 +569,7 @@ static const struct platform_features tmtd_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, + .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; static const struct platform_features knl_features = { @@ -541,6 +577,7 @@ static const struct platform_features knl_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_KNL, + .trl_msrs = TRL_KNL, }; static const struct platform_features default_features = { @@ -2911,29 +2948,7 @@ static void dump_ivt_turbo_ratio_limits(void) return; } -int has_turbo_ratio_group_limits(int family, int model) -{ - - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_SKYLAKE_X: - case INTEL_FAM6_ICELAKE_X: - case INTEL_FAM6_SAPPHIRERAPIDS_X: - case INTEL_FAM6_ATOM_GOLDMONT_D: - case INTEL_FAM6_ATOM_TREMONT_D: - return 1; - default: - return 0; - } -} - -static void dump_turbo_ratio_limits(int trl_msr_offset, int family, int model) +static void dump_turbo_ratio_limits(int trl_msr_offset) { unsigned long long msr, core_counts; int shift; @@ -2942,7 +2957,7 @@ static void dump_turbo_ratio_limits(int trl_msr_offset, int family, int model) fprintf(outf, "cpu%d: MSR_%sTURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY_" : "", msr); - if (has_turbo_ratio_group_limits(family, model)) { + if (platform->trl_msrs & TRL_CORECOUNT) { get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts); fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, core_counts); } else { @@ -4236,100 +4251,6 @@ int is_jvl(unsigned int family, unsigned int model) return 0; } -int has_turbo_ratio_limit(unsigned int family, unsigned int model) -{ - if (has_slv_msrs(family, model)) - return 0; - - if (family != 6) - return 0; - - switch (model) { - /* Nehalem compatible, but do not include turbo-ratio limit support */ - case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ - case INTEL_FAM6_XEON_PHI_KNL: /* PHI - Knights Landing (different MSR definition) */ - return 0; - default: - return 1; - } -} - -int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model) -{ - if (has_slv_msrs(family, model)) - return 1; - - return 0; -} - -int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ - case INTEL_FAM6_HASWELL_X: /* HSW Xeon */ - return 1; - default: - return 0; - } -} - -int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_HASWELL_X: /* HSW Xeon */ - return 1; - default: - return 0; - } -} - -int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ - return 1; - default: - return 0; - } -} - -int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_ATOM_GOLDMONT: - case INTEL_FAM6_SKYLAKE_X: - case INTEL_FAM6_ICELAKE_X: - case INTEL_FAM6_SAPPHIRERAPIDS_X: - return 1; - default: - return 0; - } -} - int has_config_tdp(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -4404,23 +4325,23 @@ static void dump_turbo_ratio_info(unsigned int family, unsigned int model) if (!has_turbo) return; - if (has_hsw_turbo_ratio_limit(family, model)) + if (platform->trl_msrs & TRL_LIMIT2) dump_hsw_turbo_ratio_limits(); - if (has_ivt_turbo_ratio_limit(family, model)) + if (platform->trl_msrs & TRL_LIMIT1) dump_ivt_turbo_ratio_limits(); - if (has_turbo_ratio_limit(family, model)) { - dump_turbo_ratio_limits(MSR_TURBO_RATIO_LIMIT, family, model); + if (platform->trl_msrs & TRL_BASE) { + dump_turbo_ratio_limits(MSR_TURBO_RATIO_LIMIT); if (is_hybrid) - dump_turbo_ratio_limits(MSR_SECONDARY_TURBO_RATIO_LIMIT, family, model); + dump_turbo_ratio_limits(MSR_SECONDARY_TURBO_RATIO_LIMIT); } - if (has_atom_turbo_ratio_limit(family, model)) + if (platform->trl_msrs & TRL_ATOM) dump_atom_turbo_ratio_limits(); - if (has_knl_turbo_ratio_limit(family, model)) + if (platform->trl_msrs & TRL_KNL) dump_knl_turbo_ratio_limits(); if (has_config_tdp(family, model)) From a3943deaf98f713c819dd7e67af734d4ed4da030 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 30 Jul 2023 13:54:25 +0800 Subject: [PATCH 018/299] tools/power/turbostat: Rename some TRL functions Rename dump_hsw_turbo_ratio_limits() and dump_ivt_turbo_ratio_limits() to dump_turbo_ratio_limit2() and dump_turbo_ratio_limit1() because they dump MSR_TURBO_RATIO_LIMIT1/LIMIT2, and the MSRs' behavior is consistent when they are available. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index b6c53ec740d3..abcc055ea0e1 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -2886,7 +2886,7 @@ static void dump_platform_info(void) return; } -static void dump_hsw_turbo_ratio_limits(void) +static void dump_turbo_ratio_limit2(void) { unsigned long long msr; unsigned int ratio; @@ -2905,7 +2905,7 @@ static void dump_hsw_turbo_ratio_limits(void) return; } -static void dump_ivt_turbo_ratio_limits(void) +static void dump_turbo_ratio_limit1(void) { unsigned long long msr; unsigned int ratio; @@ -4326,10 +4326,10 @@ static void dump_turbo_ratio_info(unsigned int family, unsigned int model) return; if (platform->trl_msrs & TRL_LIMIT2) - dump_hsw_turbo_ratio_limits(); + dump_turbo_ratio_limit2(); if (platform->trl_msrs & TRL_LIMIT1) - dump_ivt_turbo_ratio_limits(); + dump_turbo_ratio_limit1(); if (platform->trl_msrs & TRL_BASE) { dump_turbo_ratio_limits(MSR_TURBO_RATIO_LIMIT); From a61c9cb478c0bf31a50d1829834e822e92876c95 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 14:19:46 +0800 Subject: [PATCH 019/299] tools/power/turbostat: Abstract Config TDP MSRs support Abstract the support for MSR_CONFIG_TDP_NOMINAL/LEVEL_1/LEVEL_2/CONTROL and MSR_TURBO_ACTIVATION_RATIO. Delete has_config_tdp() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 54 ++++++++++----------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index abcc055ea0e1..bb9d8c2605c8 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -286,6 +286,7 @@ struct platform_features { bool has_msr_misc_feature_control; /* MSR_MISC_FEATURE_CONTROL */ bool has_msr_misc_pwr_mgmt; /* MSR_MISC_PWR_MGMT */ bool has_nhm_msrs; /* MSR_PLATFORM_INFO, MSR_IA32_TEMPERATURE_TARGET, MSR_SMI_COUNT, MSR_PKG_CST_CONFIG_CONTROL, TRL MSRs */ + bool has_config_tdp; /* MSR_CONFIG_TDP_NOMINAL/LEVEL_1/LEVEL_2/CONTROL, MSR_TURBO_ACTIVATION_RATIO */ int bclk_freq; /* CPU base clock */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ @@ -388,6 +389,7 @@ static const struct platform_features ivb_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, @@ -406,6 +408,7 @@ static const struct platform_features hsw_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, @@ -415,6 +418,7 @@ static const struct platform_features hsx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, @@ -424,6 +428,7 @@ static const struct platform_features hswl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, @@ -433,6 +438,7 @@ static const struct platform_features hswg_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, @@ -442,6 +448,7 @@ static const struct platform_features bdw_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, @@ -451,6 +458,7 @@ static const struct platform_features bdwg_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, @@ -460,6 +468,7 @@ static const struct platform_features bdx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, @@ -469,6 +478,7 @@ static const struct platform_features skl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, @@ -478,6 +488,7 @@ static const struct platform_features cnl_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, @@ -487,6 +498,7 @@ static const struct platform_features skx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SKX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, @@ -496,6 +508,7 @@ static const struct platform_features icx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_ICX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, @@ -505,6 +518,7 @@ static const struct platform_features spr_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SKX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, @@ -575,6 +589,7 @@ static const struct platform_features tmtd_features = { static const struct platform_features knl_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, + .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_KNL, .trl_msrs = TRL_KNL, @@ -4251,35 +4266,6 @@ int is_jvl(unsigned int family, unsigned int model) return 0; } -int has_config_tdp(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_IVYBRIDGE: /* IVB */ - case INTEL_FAM6_HASWELL: /* HSW */ - case INTEL_FAM6_HASWELL_X: /* HSX */ - case INTEL_FAM6_HASWELL_L: /* HSW */ - case INTEL_FAM6_HASWELL_G: /* HSW */ - case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_G: /* BDW */ - case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_L: /* SKL */ - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - case INTEL_FAM6_SKYLAKE_X: /* SKX */ - case INTEL_FAM6_ICELAKE_X: /* ICX */ - case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ - case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ - return 1; - default: - return 0; - } -} - /* * tcc_offset_bits: * 0: Tcc Offset not supported (Default) @@ -4320,7 +4306,7 @@ static void remove_underbar(char *s) *to = 0; } -static void dump_turbo_ratio_info(unsigned int family, unsigned int model) +static void dump_turbo_ratio_info(void) { if (!has_turbo) return; @@ -4344,17 +4330,17 @@ static void dump_turbo_ratio_info(unsigned int family, unsigned int model) if (platform->trl_msrs & TRL_KNL) dump_knl_turbo_ratio_limits(); - if (has_config_tdp(family, model)) + if (platform->has_config_tdp) dump_config_tdp(); } -static void dump_cstate_pstate_config_info(unsigned int family, unsigned int model) +static void dump_cstate_pstate_config_info(void) { if (!platform->has_nhm_msrs) return; dump_platform_info(); - dump_turbo_ratio_info(family, model); + dump_turbo_ratio_info(); dump_cst_cfg(); } @@ -6000,7 +5986,7 @@ void process_cpuid() check_tcc_offset(model); if (!quiet) - dump_cstate_pstate_config_info(family, model); + dump_cstate_pstate_config_info(); intel_uncore_frequency_probe(); if (!quiet) From d8e1623baa0b49aa90cf5801cfaac9e3d3aa1e19 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 25 Aug 2023 23:04:44 +0800 Subject: [PATCH 020/299] tools/power/turbostat: Abstract TCC Offset bits support Abstract the support for different TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET. Delete check_tcc_offset() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 53 ++++++--------------------- 1 file changed, 12 insertions(+), 41 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bb9d8c2605c8..c39beb4078da 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -256,7 +256,6 @@ unsigned int gfx_cur_mhz; unsigned int gfx_act_mhz; unsigned int tj_max; unsigned int tj_max_override; -int tcc_offset_bits; double rapl_power_units, rapl_time_units; double rapl_dram_energy_units, rapl_energy_units; double rapl_joule_counter_range; @@ -290,6 +289,7 @@ struct platform_features { int bclk_freq; /* CPU base clock */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ + int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ }; struct platform_data { @@ -482,6 +482,7 @@ static const struct platform_features skl_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, + .tcc_offset_bits = 6, }; static const struct platform_features cnl_features = { @@ -492,6 +493,7 @@ static const struct platform_features cnl_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, + .tcc_offset_bits = 6, }; static const struct platform_features skx_features = { @@ -4266,33 +4268,6 @@ int is_jvl(unsigned int family, unsigned int model) return 0; } -/* - * tcc_offset_bits: - * 0: Tcc Offset not supported (Default) - * 6: Bit 29:24 of MSR_PLATFORM_INFO - * 4: Bit 27:24 of MSR_PLATFORM_INFO - */ -void check_tcc_offset(int model) -{ - unsigned long long msr; - - if (!genuine_intel) - return; - - switch (model) { - case INTEL_FAM6_SKYLAKE_L: - case INTEL_FAM6_CANNONLAKE_L: - if (!get_msr(base_cpu, MSR_PLATFORM_INFO, &msr)) { - msr = (msr >> 30) & 1; - if (msr) - tcc_offset_bits = 6; - } - return; - default: - return; - } -} - static void remove_underbar(char *s) { char *to = s; @@ -5490,20 +5465,18 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk tcc_default = (msr >> 16) & 0xFF; if (!quiet) { - switch (tcc_offset_bits) { - case 4: - tcc_offset = (msr >> 24) & 0xF; + int bits = platform->tcc_offset_bits; + unsigned long long enabled = 0; + + if (bits && !get_msr(base_cpu, MSR_PLATFORM_INFO, &enabled)) + enabled = (enabled >> 30) & 1; + + if (bits && enabled) { + tcc_offset = (msr >> 24) & GENMASK(bits - 1, 0); fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n", cpu, msr, tcc_default - tcc_offset, tcc_default, tcc_offset); - break; - case 6: - tcc_offset = (msr >> 24) & 0x3F; - fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n", - cpu, msr, tcc_default - tcc_offset, tcc_default, tcc_offset); - break; - default: + } else { fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, tcc_default); - break; } } @@ -5983,8 +5956,6 @@ void process_cpuid() automatic_cstate_conversion_probe(family, model); prewake_cstate_probe(family, model); - check_tcc_offset(model); - if (!quiet) dump_cstate_pstate_config_info(); intel_uncore_frequency_probe(); From 0c057cf7a0e163bf9631b83c002b3a55691674c7 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 30 Jul 2023 14:25:17 +0800 Subject: [PATCH 021/299] tools/power/turbostat: Abstract Perf Limit Reasons MSRs support Abstract the support for MSR_CORE/GFX/RING_PERF_LIMIT_REASONS MSRs. Delete perf_limit_reasons_probe() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 44 +++++++++------------------ 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index c39beb4078da..1207845340ad 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -259,11 +259,8 @@ unsigned int tj_max_override; double rapl_power_units, rapl_time_units; double rapl_dram_energy_units, rapl_energy_units; double rapl_joule_counter_range; -unsigned int do_core_perf_limit_reasons; unsigned int has_automatic_cstate_conversion; unsigned int dis_cstate_prewake; -unsigned int do_gfx_perf_limit_reasons; -unsigned int do_ring_perf_limit_reasons; unsigned int crystal_hz; unsigned long long tsc_hz; int base_cpu; @@ -289,6 +286,7 @@ struct platform_features { int bclk_freq; /* CPU base clock */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ + int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ }; @@ -352,6 +350,13 @@ enum turbo_ratio_limit_msrs { TRL_CORECOUNT = BIT(5), }; +/* For Perf Limit Reason MSRs */ +enum perf_limit_reason_msrs { + PLR_CORE = BIT(0), + PLR_GFX = BIT(1), + PLR_RING = BIT(2), +}; + static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, @@ -412,6 +417,7 @@ static const struct platform_features hsw_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, + .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, }; static const struct platform_features hsx_features = { @@ -422,6 +428,7 @@ static const struct platform_features hsx_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, + .plr_msrs = PLR_CORE | PLR_RING, }; static const struct platform_features hswl_features = { @@ -432,6 +439,7 @@ static const struct platform_features hswl_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, + .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, }; static const struct platform_features hswg_features = { @@ -442,6 +450,7 @@ static const struct platform_features hswg_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, + .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, }; static const struct platform_features bdw_features = { @@ -4657,7 +4666,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data return -1; } - if (do_core_perf_limit_reasons) { + if (platform->plr_msrs & PLR_CORE) { get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr); fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)", @@ -4690,7 +4699,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 16) ? "PROCHOT, " : ""); } - if (do_gfx_perf_limit_reasons) { + if (platform->plr_msrs & PLR_GFX) { get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr); fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)", @@ -4710,7 +4719,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data (msr & 1 << 25) ? "GFXPwr, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : ""); } - if (do_ring_perf_limit_reasons) { + if (platform->plr_msrs & PLR_RING) { get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr); fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(outf, " (Active: %s%s%s%s%s%s)", @@ -5002,28 +5011,6 @@ void rapl_probe(unsigned int family, unsigned int model) rapl_probe_amd(family, model); } -void perf_limit_reasons_probe(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return; - - if (family != 6) - return; - - switch (model) { - case INTEL_FAM6_HASWELL: /* HSW */ - case INTEL_FAM6_HASWELL_L: /* HSW */ - case INTEL_FAM6_HASWELL_G: /* HSW */ - do_gfx_perf_limit_reasons = 1; - /* FALLTHRU */ - case INTEL_FAM6_HASWELL_X: /* HSX */ - do_core_perf_limit_reasons = 1; - do_ring_perf_limit_reasons = 1; - default: - return; - } -} - void automatic_cstate_conversion_probe(unsigned int family, unsigned int model) { if (family != 6) @@ -5952,7 +5939,6 @@ void process_cpuid() decode_c6_demotion_policy_msr(); rapl_probe(family, model); - perf_limit_reasons_probe(family, model); automatic_cstate_conversion_probe(family, model); prewake_cstate_probe(family, model); From d90120bf9f111bec8ba0b8ef86c46ccbcd9df188 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 22 Apr 2023 11:29:18 +0800 Subject: [PATCH 022/299] tools/power/turbostat: Abstract Automatic Cstate Conversion support Abstract the support for AUTOMATIC_CSTATE_CONVERSION bit in MSR_PKG_CST_CONFIG_CONTROL. Delete automatic_cstate_conversion_probe() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 1207845340ad..a235cbf7b581 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -259,7 +259,6 @@ unsigned int tj_max_override; double rapl_power_units, rapl_time_units; double rapl_dram_energy_units, rapl_energy_units; double rapl_joule_counter_range; -unsigned int has_automatic_cstate_conversion; unsigned int dis_cstate_prewake; unsigned int crystal_hz; unsigned long long tsc_hz; @@ -285,6 +284,7 @@ struct platform_features { bool has_config_tdp; /* MSR_CONFIG_TDP_NOMINAL/LEVEL_1/LEVEL_2/CONTROL, MSR_TURBO_ACTIVATION_RATIO */ int bclk_freq; /* CPU base clock */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ + bool has_cst_auto_convension; /* AUTOMATIC_CSTATE_CONVERSION bit in MSR_PKG_CST_CONFIG_CONTROL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ @@ -480,6 +480,7 @@ static const struct platform_features bdx_features = { .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, + .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, }; @@ -512,6 +513,7 @@ static const struct platform_features skx_features = { .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SKX, + .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; @@ -3116,7 +3118,7 @@ static void dump_cst_cfg(void) (msr & (1 << 15)) ? "" : "UN", (unsigned int)msr & 0xF, pkg_cstate_limit_strings[pkg_cstate_limit]); #define AUTOMATIC_CSTATE_CONVERSION (1UL << 16) - if (has_automatic_cstate_conversion) { + if (platform->has_cst_auto_convension) { fprintf(outf, ", automatic c-state conversion=%s", (msr & AUTOMATIC_CSTATE_CONVERSION) ? "on" : "off"); } @@ -5011,18 +5013,6 @@ void rapl_probe(unsigned int family, unsigned int model) rapl_probe_amd(family, model); } -void automatic_cstate_conversion_probe(unsigned int family, unsigned int model) -{ - if (family != 6) - return; - - switch (model) { - case INTEL_FAM6_BROADWELL_X: - case INTEL_FAM6_SKYLAKE_X: - has_automatic_cstate_conversion = 1; - } -} - void prewake_cstate_probe(unsigned int family, unsigned int model) { if (is_icx(family, model) || is_spr(family, model)) @@ -5939,7 +5929,6 @@ void process_cpuid() decode_c6_demotion_policy_msr(); rapl_probe(family, model); - automatic_cstate_conversion_probe(family, model); prewake_cstate_probe(family, model); if (!quiet) From a5d1ab93a0993616efbf61378e491d6673f4684d Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 22 Apr 2023 11:39:49 +0800 Subject: [PATCH 023/299] tools/power/turbostat: Abstract hardcoded Crystal Clock frequency Abstract the support for hardcoded Crystal Clock frequency, which is used when crystal clock is not available from CPUID.15. Delete CPU model checks in process_cpuid(). Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index a235cbf7b581..c76baa10f4eb 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -283,6 +283,7 @@ struct platform_features { bool has_nhm_msrs; /* MSR_PLATFORM_INFO, MSR_IA32_TEMPERATURE_TARGET, MSR_SMI_COUNT, MSR_PKG_CST_CONFIG_CONTROL, TRL MSRs */ bool has_config_tdp; /* MSR_CONFIG_TDP_NOMINAL/LEVEL_1/LEVEL_2/CONTROL, MSR_TURBO_ACTIVATION_RATIO */ int bclk_freq; /* CPU base clock */ + int crystal_freq; /* Crystal clock to use when not available from CPUID.15 */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ bool has_cst_auto_convension; /* AUTOMATIC_CSTATE_CONVERSION bit in MSR_PKG_CST_CONFIG_CONTROL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ @@ -490,6 +491,7 @@ static const struct platform_features skl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .crystal_freq = 24000000, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -563,6 +565,7 @@ static const struct platform_features gmt_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, + .crystal_freq = 19200000, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; @@ -571,6 +574,7 @@ static const struct platform_features gmtd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, + .crystal_freq = 25000000, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, }; @@ -579,6 +583,7 @@ static const struct platform_features gmtp_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, + .crystal_freq = 19200000, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, }; @@ -5796,26 +5801,12 @@ void process_cpuid() __cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx); if (ebx_tsc != 0) { - if (!quiet && (ebx != 0)) fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", eax_crystal, ebx_tsc, crystal_hz); if (crystal_hz == 0) - switch (model) { - case INTEL_FAM6_SKYLAKE_L: /* SKL */ - crystal_hz = 24000000; /* 24.0 MHz */ - break; - case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ - crystal_hz = 25000000; /* 25.0 MHz */ - break; - case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - crystal_hz = 19200000; /* 19.2 MHz */ - break; - default: - crystal_hz = 0; - } + crystal_hz = platform->crystal_freq; if (crystal_hz) { tsc_hz = (unsigned long long)crystal_hz *ebx_tsc / eax_crystal; From b9cd66833d3a651cea10666674e9abcf2182e8ad Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 26 Aug 2023 14:38:38 +0800 Subject: [PATCH 024/299] tools/power/turbostat: Redefine RAPL macros Redefine RAPL macros to make the code more readable. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 102 ++++++++++++-------------- 1 file changed, 45 insertions(+), 57 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index c76baa10f4eb..4829e8289feb 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -358,6 +358,40 @@ enum perf_limit_reason_msrs { PLR_RING = BIT(2), }; +/* For RAPL MSRs */ +enum rapl_msrs { + RAPL_PKG_POWER_LIMIT = BIT(0), /* 0x610 MSR_PKG_POWER_LIMIT */ + RAPL_PKG_ENERGY_STATUS = BIT(1), /* 0x611 MSR_PKG_ENERGY_STATUS */ + RAPL_PKG_PERF_STATUS = BIT(2), /* 0x613 MSR_PKG_PERF_STATUS */ + RAPL_PKG_POWER_INFO = BIT(3), /* 0x614 MSR_PKG_POWER_INFO */ + RAPL_DRAM_POWER_LIMIT = BIT(4), /* 0x618 MSR_DRAM_POWER_LIMIT */ + RAPL_DRAM_ENERGY_STATUS = BIT(5), /* 0x619 MSR_DRAM_ENERGY_STATUS */ + RAPL_DRAM_PERF_STATUS = BIT(6), /* 0x61b MSR_DRAM_PERF_STATUS */ + RAPL_DRAM_POWER_INFO = BIT(7), /* 0x61c MSR_DRAM_POWER_INFO */ + RAPL_CORE_POWER_LIMIT = BIT(8), /* 0x638 MSR_PP0_POWER_LIMIT */ + RAPL_CORE_ENERGY_STATUS = BIT(9), /* 0x639 MSR_PP0_ENERGY_STATUS */ + RAPL_CORE_POLICY = BIT(10), /* 0x63a MSR_PP0_POLICY */ + RAPL_GFX_POWER_LIMIT = BIT(11), /* 0x640 MSR_PP1_POWER_LIMIT */ + RAPL_GFX_ENERGY_STATUS = BIT(12), /* 0x641 MSR_PP1_ENERGY_STATUS */ + RAPL_GFX_POLICY = BIT(13), /* 0x642 MSR_PP1_POLICY */ + RAPL_AMD_PWR_UNIT = BIT(14), /* 0xc0010299 MSR_AMD_RAPL_POWER_UNIT */ + RAPL_AMD_CORE_ENERGY_STAT = BIT(15), /* 0xc001029a MSR_AMD_CORE_ENERGY_STATUS */ + RAPL_AMD_PKG_ENERGY_STAT = BIT(16), /* 0xc001029b MSR_AMD_PKG_ENERGY_STATUS */ + RAPL_PER_CORE_ENERGY = BIT(17), /* Indicates cores energy collection is per-core, not per-package. */ +}; + +#define RAPL_PKG (RAPL_PKG_ENERGY_STATUS | RAPL_PKG_POWER_LIMIT) +#define RAPL_DRAM (RAPL_DRAM_ENERGY_STATUS | RAPL_DRAM_POWER_LIMIT) +#define RAPL_CORE (RAPL_CORE_ENERGY_STATUS | RAPL_CORE_POWER_LIMIT) +#define RAPL_GFX (RAPL_GFX_POWER_LIMIT | RAPL_GFX_ENERGY_STATUS) + +#define RAPL_PKG_ALL (RAPL_PKG | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO) +#define RAPL_DRAM_ALL (RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_DRAM_POWER_INFO) +#define RAPL_CORE_ALL (RAPL_CORE | RAPL_CORE_POLICY) +#define RAPL_GFX_ALL (RAPL_GFX | RAPL_GFX_POLIGY) + +#define RAPL_AMD_F17H (RAPL_AMD_PWR_UNIT | RAPL_AMD_CORE_ENERGY_STAT | RAPL_AMD_PKG_ENERGY_STAT) + static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, @@ -712,42 +746,6 @@ void probe_platform_features(unsigned int family, unsigned int model) /* Model specific support End */ -#define RAPL_PKG (1 << 0) - /* 0x610 MSR_PKG_POWER_LIMIT */ - /* 0x611 MSR_PKG_ENERGY_STATUS */ -#define RAPL_PKG_PERF_STATUS (1 << 1) - /* 0x613 MSR_PKG_PERF_STATUS */ -#define RAPL_PKG_POWER_INFO (1 << 2) - /* 0x614 MSR_PKG_POWER_INFO */ - -#define RAPL_DRAM (1 << 3) - /* 0x618 MSR_DRAM_POWER_LIMIT */ - /* 0x619 MSR_DRAM_ENERGY_STATUS */ -#define RAPL_DRAM_PERF_STATUS (1 << 4) - /* 0x61b MSR_DRAM_PERF_STATUS */ -#define RAPL_DRAM_POWER_INFO (1 << 5) - /* 0x61c MSR_DRAM_POWER_INFO */ - -#define RAPL_CORES_POWER_LIMIT (1 << 6) - /* 0x638 MSR_PP0_POWER_LIMIT */ -#define RAPL_CORE_POLICY (1 << 7) - /* 0x63a MSR_PP0_POLICY */ - -#define RAPL_GFX (1 << 8) - /* 0x640 MSR_PP1_POWER_LIMIT */ - /* 0x641 MSR_PP1_ENERGY_STATUS */ - /* 0x642 MSR_PP1_POLICY */ - -#define RAPL_CORES_ENERGY_STATUS (1 << 9) - /* 0x639 MSR_PP0_ENERGY_STATUS */ -#define RAPL_PER_CORE_ENERGY (1 << 10) - /* Indicates cores energy collection is per-core, - * not per-package. */ -#define RAPL_AMD_F17H (1 << 11) - /* 0xc0010299 MSR_RAPL_PWR_UNIT */ - /* 0xc001029a MSR_CORE_ENERGY_STAT */ - /* 0xc001029b MSR_PKG_ENERGY_STAT */ -#define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) #define TJMAX_DEFAULT 100 /* MSRs that are not yet in the kernel-provided header. */ @@ -948,7 +946,7 @@ int idx_valid(int idx) case IDX_DRAM_ENERGY: return do_rapl & RAPL_DRAM; case IDX_PP0_ENERGY: - return do_rapl & RAPL_CORES_ENERGY_STATUS; + return do_rapl & RAPL_CORE_ENERGY_STATUS; case IDX_PP1_ENERGY: return do_rapl & RAPL_GFX; case IDX_PKG_PERF: @@ -2710,7 +2708,7 @@ retry: return -13; p->energy_pkg = msr; } - if (do_rapl & RAPL_CORES_ENERGY_STATUS) { + if (do_rapl & RAPL_CORE_ENERGY_STATUS) { if (get_msr_sum(cpu, MSR_PP0_ENERGY_STATUS, &msr)) return -14; p->energy_cores = msr; @@ -4810,7 +4808,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_G: /* BDW */ - do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; + do_rapl = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); @@ -4830,9 +4828,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) BIC_PRESENT(BIC_PkgWatt); break; case INTEL_FAM6_ATOM_TREMONT: /* EHL */ - do_rapl = - RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS - | RAPL_GFX | RAPL_PKG_POWER_INFO; + do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); @@ -4846,7 +4842,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) } break; case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ - do_rapl = RAPL_PKG | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + do_rapl = RAPL_PKG_ALL; BIC_PRESENT(BIC_PKG__); if (rapl_joules) BIC_PRESENT(BIC_Pkg_J); @@ -4855,9 +4851,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) break; case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - do_rapl = - RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS - | RAPL_GFX | RAPL_PKG_POWER_INFO; + do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { @@ -4878,9 +4872,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) case INTEL_FAM6_ICELAKE_X: /* ICX */ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ - do_rapl = - RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | - RAPL_PKG_POWER_INFO; + do_rapl = RAPL_PKG_ALL | RAPL_DRAM_ALL; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { @@ -4893,9 +4885,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) break; case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE_X: - do_rapl = - RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | - RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; + do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { @@ -4910,7 +4900,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ - do_rapl = RAPL_PKG | RAPL_CORES; + do_rapl = RAPL_PKG | RAPL_CORE; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); @@ -4920,9 +4910,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) } break; case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ - do_rapl = - RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | - RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; + do_rapl = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { @@ -5195,7 +5183,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); } - if (do_rapl & RAPL_CORES_POWER_LIMIT) { + if (do_rapl & RAPL_CORE_POWER_LIMIT) { if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", From a98f886035d5f7e0ec66036dd6bf98b40e75b692 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 26 Aug 2023 14:57:12 +0800 Subject: [PATCH 025/299] tools/power/turbostat: Simplify the logic for RAPL enumeration The support for each RAPL domains, as well as the support for the perf status of each RAPL domains, can be detected by checking the availabilities of the corresponding RAPL MSRs. Change the code accordingly and remove the hardcoded logic for each model. Note that this also fixes the INTEL_FAM6_ATOM_TREMONT model, which has RAPL_PKG_PERF_STATUS and MSR_DRAM_PERF_STATUS but doesn't have BIC_PKG__ and BIC_RAM__ set. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 105 ++++++-------------------- 1 file changed, 25 insertions(+), 80 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 4829e8289feb..b2da36437b12 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4809,62 +4809,20 @@ void rapl_probe_intel(unsigned int family, unsigned int model) case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_G: /* BDW */ do_rapl = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO; - if (rapl_joules) { - BIC_PRESENT(BIC_Pkg_J); - BIC_PRESENT(BIC_Cor_J); - BIC_PRESENT(BIC_GFX_J); - } else { - BIC_PRESENT(BIC_PkgWatt); - BIC_PRESENT(BIC_CorWatt); - BIC_PRESENT(BIC_GFXWatt); - } break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; - if (rapl_joules) - BIC_PRESENT(BIC_Pkg_J); - else - BIC_PRESENT(BIC_PkgWatt); break; case INTEL_FAM6_ATOM_TREMONT: /* EHL */ do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX; - if (rapl_joules) { - BIC_PRESENT(BIC_Pkg_J); - BIC_PRESENT(BIC_Cor_J); - BIC_PRESENT(BIC_RAM_J); - BIC_PRESENT(BIC_GFX_J); - } else { - BIC_PRESENT(BIC_PkgWatt); - BIC_PRESENT(BIC_CorWatt); - BIC_PRESENT(BIC_RAMWatt); - BIC_PRESENT(BIC_GFXWatt); - } break; case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ do_rapl = RAPL_PKG_ALL; - BIC_PRESENT(BIC_PKG__); - if (rapl_joules) - BIC_PRESENT(BIC_Pkg_J); - else - BIC_PRESENT(BIC_PkgWatt); break; case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX; - BIC_PRESENT(BIC_PKG__); - BIC_PRESENT(BIC_RAM__); - if (rapl_joules) { - BIC_PRESENT(BIC_Pkg_J); - BIC_PRESENT(BIC_Cor_J); - BIC_PRESENT(BIC_RAM_J); - BIC_PRESENT(BIC_GFX_J); - } else { - BIC_PRESENT(BIC_PkgWatt); - BIC_PRESENT(BIC_CorWatt); - BIC_PRESENT(BIC_RAMWatt); - BIC_PRESENT(BIC_GFXWatt); - } break; case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_BROADWELL_X: /* BDX */ @@ -4873,60 +4831,47 @@ void rapl_probe_intel(unsigned int family, unsigned int model) case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ do_rapl = RAPL_PKG_ALL | RAPL_DRAM_ALL; - BIC_PRESENT(BIC_PKG__); - BIC_PRESENT(BIC_RAM__); - if (rapl_joules) { - BIC_PRESENT(BIC_Pkg_J); - BIC_PRESENT(BIC_RAM_J); - } else { - BIC_PRESENT(BIC_PkgWatt); - BIC_PRESENT(BIC_RAMWatt); - } break; case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE_X: do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL; - BIC_PRESENT(BIC_PKG__); - BIC_PRESENT(BIC_RAM__); - if (rapl_joules) { - BIC_PRESENT(BIC_Pkg_J); - BIC_PRESENT(BIC_Cor_J); - BIC_PRESENT(BIC_RAM_J); - } else { - BIC_PRESENT(BIC_PkgWatt); - BIC_PRESENT(BIC_CorWatt); - BIC_PRESENT(BIC_RAMWatt); - } break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ do_rapl = RAPL_PKG | RAPL_CORE; - if (rapl_joules) { - BIC_PRESENT(BIC_Pkg_J); - BIC_PRESENT(BIC_Cor_J); - } else { - BIC_PRESENT(BIC_PkgWatt); - BIC_PRESENT(BIC_CorWatt); - } break; case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ do_rapl = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS; - BIC_PRESENT(BIC_PKG__); - BIC_PRESENT(BIC_RAM__); - if (rapl_joules) { - BIC_PRESENT(BIC_Pkg_J); - BIC_PRESENT(BIC_Cor_J); - BIC_PRESENT(BIC_RAM_J); - } else { - BIC_PRESENT(BIC_PkgWatt); - BIC_PRESENT(BIC_CorWatt); - BIC_PRESENT(BIC_RAMWatt); - } break; default: return; } + if (rapl_joules) { + if (do_rapl & RAPL_PKG_ENERGY_STATUS) + BIC_PRESENT(BIC_Pkg_J); + if (do_rapl & RAPL_CORE_ENERGY_STATUS) + BIC_PRESENT(BIC_Cor_J); + if (do_rapl & RAPL_DRAM_ENERGY_STATUS) + BIC_PRESENT(BIC_RAM_J); + if (do_rapl & RAPL_GFX_ENERGY_STATUS) + BIC_PRESENT(BIC_GFX_J); + } else { + if (do_rapl & RAPL_PKG_ENERGY_STATUS) + BIC_PRESENT(BIC_PkgWatt); + if (do_rapl & RAPL_CORE_ENERGY_STATUS) + BIC_PRESENT(BIC_CorWatt); + if (do_rapl & RAPL_DRAM_ENERGY_STATUS) + BIC_PRESENT(BIC_RAMWatt); + if (do_rapl & RAPL_GFX_ENERGY_STATUS) + BIC_PRESENT(BIC_GFXWatt); + } + + if (do_rapl & RAPL_PKG_PERF_STATUS) + BIC_PRESENT(BIC_PKG__); + if (do_rapl & RAPL_DRAM_PERF_STATUS) + BIC_PRESENT(BIC_RAM__); + /* units on package 0, verify later other packages match */ if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) return; From 86ba263d9b72b7766a99059e9b3bd104d089b7fa Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 28 Aug 2023 15:09:40 +0800 Subject: [PATCH 026/299] tools/power/turbostat: Abstract RAPL MSRs support Abstract the support for RAPL MSRs. Delete CPU model checks in rapl_probe_intel(). Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 189 ++++++++++++-------------- 1 file changed, 85 insertions(+), 104 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index b2da36437b12..90e1abe96dd5 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -288,6 +288,7 @@ struct platform_features { bool has_cst_auto_convension; /* AUTOMATIC_CSTATE_CONVERSION bit in MSR_PKG_CST_CONFIG_CONTROL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ + int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ }; @@ -414,6 +415,7 @@ static const struct platform_features snb_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; static const struct platform_features snx_features = { @@ -423,6 +425,7 @@ static const struct platform_features snx_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, }; static const struct platform_features ivb_features = { @@ -433,6 +436,7 @@ static const struct platform_features ivb_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; static const struct platform_features ivx_features = { @@ -442,6 +446,7 @@ static const struct platform_features ivx_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE | TRL_LIMIT1, + .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, }; static const struct platform_features hsw_features = { @@ -453,6 +458,7 @@ static const struct platform_features hsw_features = { .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, + .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; static const struct platform_features hsx_features = { @@ -464,6 +470,7 @@ static const struct platform_features hsx_features = { .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, .plr_msrs = PLR_CORE | PLR_RING, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; static const struct platform_features hswl_features = { @@ -475,6 +482,7 @@ static const struct platform_features hswl_features = { .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, + .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; static const struct platform_features hswg_features = { @@ -486,6 +494,7 @@ static const struct platform_features hswg_features = { .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, + .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; static const struct platform_features bdw_features = { @@ -496,6 +505,7 @@ static const struct platform_features bdw_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; static const struct platform_features bdwg_features = { @@ -506,6 +516,7 @@ static const struct platform_features bdwg_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; static const struct platform_features bdx_features = { @@ -517,6 +528,7 @@ static const struct platform_features bdx_features = { .cst_limit = CST_LIMIT_HSW, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; static const struct platform_features skl_features = { @@ -529,6 +541,7 @@ static const struct platform_features skl_features = { .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, + .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, }; static const struct platform_features cnl_features = { @@ -540,6 +553,7 @@ static const struct platform_features cnl_features = { .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, + .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, }; static const struct platform_features skx_features = { @@ -551,6 +565,7 @@ static const struct platform_features skx_features = { .cst_limit = CST_LIMIT_SKX, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; static const struct platform_features icx_features = { @@ -561,6 +576,7 @@ static const struct platform_features icx_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_ICX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; static const struct platform_features spr_features = { @@ -571,6 +587,7 @@ static const struct platform_features spr_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_SKX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; static const struct platform_features slv_features = { @@ -578,6 +595,7 @@ static const struct platform_features slv_features = { .bclk_freq = BCLK_SLV, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_ATOM, + .rapl_msrs = RAPL_PKG | RAPL_CORE, }; static const struct platform_features slvd_features = { @@ -586,6 +604,7 @@ static const struct platform_features slvd_features = { .bclk_freq = BCLK_SLV, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG | RAPL_CORE, }; static const struct platform_features amt_features = { @@ -602,6 +621,7 @@ static const struct platform_features gmt_features = { .crystal_freq = 19200000, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, + .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, }; static const struct platform_features gmtd_features = { @@ -611,6 +631,7 @@ static const struct platform_features gmtd_features = { .crystal_freq = 25000000, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS, }; static const struct platform_features gmtp_features = { @@ -620,6 +641,7 @@ static const struct platform_features gmtp_features = { .crystal_freq = 19200000, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, }; static const struct platform_features tmt_features = { @@ -628,6 +650,7 @@ static const struct platform_features tmt_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, + .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, }; static const struct platform_features tmtd_features = { @@ -636,6 +659,7 @@ static const struct platform_features tmtd_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, + .rapl_msrs = RAPL_PKG_ALL, }; static const struct platform_features knl_features = { @@ -645,6 +669,7 @@ static const struct platform_features knl_features = { .bclk_freq = BCLK_100MHZ, .cst_limit = CST_LIMIT_KNL, .trl_msrs = TRL_KNL, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; static const struct platform_features default_features = { @@ -653,6 +678,10 @@ static const struct platform_features default_features = { static const struct platform_features amd_features = { }; +static const struct platform_features amd_features_with_rapl = { + .rapl_msrs = RAPL_AMD_F17H, +}; + static const struct platform_data turbostat_pdata[] = { { INTEL_FAM6_NEHALEM, &nhm_features }, { INTEL_FAM6_NEHALEM_G, &nhm_features }, @@ -728,6 +757,17 @@ void probe_platform_features(unsigned int family, unsigned int model) if (authentic_amd || hygon_genuine) { platform = &amd_features; + + if (max_extended_level >= 0x80000007) { + unsigned int eax, ebx, ecx, edx; + + __cpuid(0x80000007, eax, ebx, ecx, edx); + /* RAPL (Fam 17h+) */ + if ((edx & (1 << 14)) && family >= 0x17) { + platform = &amd_features_with_rapl; + do_rapl = RAPL_PER_CORE_ENERGY; + } + } return; } @@ -882,7 +922,7 @@ off_t idx_to_offset(int idx) switch (idx) { case IDX_PKG_ENERGY: - if (do_rapl & RAPL_AMD_F17H) + if (platform->rapl_msrs & RAPL_AMD_F17H) offset = MSR_PKG_ENERGY_STAT; else offset = MSR_PKG_ENERGY_STATUS; @@ -942,17 +982,17 @@ int idx_valid(int idx) { switch (idx) { case IDX_PKG_ENERGY: - return do_rapl & (RAPL_PKG | RAPL_AMD_F17H); + return platform->rapl_msrs & (RAPL_PKG | RAPL_AMD_F17H); case IDX_DRAM_ENERGY: - return do_rapl & RAPL_DRAM; + return platform->rapl_msrs & RAPL_DRAM; case IDX_PP0_ENERGY: - return do_rapl & RAPL_CORE_ENERGY_STATUS; + return platform->rapl_msrs & RAPL_CORE_ENERGY_STATUS; case IDX_PP1_ENERGY: - return do_rapl & RAPL_GFX; + return platform->rapl_msrs & RAPL_GFX; case IDX_PKG_PERF: - return do_rapl & RAPL_PKG_PERF_STATUS; + return platform->rapl_msrs & RAPL_PKG_PERF_STATUS; case IDX_DRAM_PERF: - return do_rapl & RAPL_DRAM_PERF_STATUS; + return platform->rapl_msrs & RAPL_DRAM_PERF_STATUS; default: return 0; } @@ -1330,10 +1370,10 @@ void print_header(char *delim) if (DO_BIC(BIC_CORE_THROT_CNT)) outp += sprintf(outp, "%sCoreThr", (printed++ ? delim : "")); - if (do_rapl && !rapl_joules) { + if (platform->rapl_msrs && !rapl_joules) { if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); - } else if (do_rapl && rapl_joules) { + } else if (platform->rapl_msrs && rapl_joules) { if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); } @@ -1392,7 +1432,7 @@ void print_header(char *delim) if (DO_BIC(BIC_SYS_LPI)) outp += sprintf(outp, "%sSYS%%LPI", (printed++ ? delim : "")); - if (do_rapl && !rapl_joules) { + if (platform->rapl_msrs && !rapl_joules) { if (DO_BIC(BIC_PkgWatt)) outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) @@ -1405,7 +1445,7 @@ void print_header(char *delim) outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); if (DO_BIC(BIC_RAM__)) outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); - } else if (do_rapl && rapl_joules) { + } else if (platform->rapl_msrs && rapl_joules) { if (DO_BIC(BIC_Pkg_J)) outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) @@ -2638,7 +2678,7 @@ retry: if (DO_BIC(BIC_CORE_THROT_CNT)) get_core_throt_cnt(cpu, &c->core_throt_cnt); - if (do_rapl & RAPL_AMD_F17H) { + if (platform->rapl_msrs & RAPL_AMD_F17H) { if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr)) return -14; c->core_energy = msr & 0xFFFFFFFF; @@ -2703,37 +2743,37 @@ retry: if (DO_BIC(BIC_SYS_LPI)) p->sys_lpi = cpuidle_cur_sys_lpi_us; - if (do_rapl & RAPL_PKG) { + if (platform->rapl_msrs & RAPL_PKG) { if (get_msr_sum(cpu, MSR_PKG_ENERGY_STATUS, &msr)) return -13; p->energy_pkg = msr; } - if (do_rapl & RAPL_CORE_ENERGY_STATUS) { + if (platform->rapl_msrs & RAPL_CORE_ENERGY_STATUS) { if (get_msr_sum(cpu, MSR_PP0_ENERGY_STATUS, &msr)) return -14; p->energy_cores = msr; } - if (do_rapl & RAPL_DRAM) { + if (platform->rapl_msrs & RAPL_DRAM) { if (get_msr_sum(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) return -15; p->energy_dram = msr; } - if (do_rapl & RAPL_GFX) { + if (platform->rapl_msrs & RAPL_GFX) { if (get_msr_sum(cpu, MSR_PP1_ENERGY_STATUS, &msr)) return -16; p->energy_gfx = msr; } - if (do_rapl & RAPL_PKG_PERF_STATUS) { + if (platform->rapl_msrs & RAPL_PKG_PERF_STATUS) { if (get_msr_sum(cpu, MSR_PKG_PERF_STATUS, &msr)) return -16; p->rapl_pkg_perf_status = msr; } - if (do_rapl & RAPL_DRAM_PERF_STATUS) { + if (platform->rapl_msrs & RAPL_DRAM_PERF_STATUS) { if (get_msr_sum(cpu, MSR_DRAM_PERF_STATUS, &msr)) return -16; p->rapl_dram_perf_status = msr; } - if (do_rapl & RAPL_AMD_F17H) { + if (platform->rapl_msrs & RAPL_AMD_F17H) { if (get_msr_sum(cpu, MSR_PKG_ENERGY_STAT, &msr)) return -13; p->energy_pkg = msr; @@ -4750,7 +4790,7 @@ double get_tdp_intel(unsigned int model) { unsigned long long msr; - if (do_rapl & RAPL_PKG_POWER_INFO) + if (platform->rapl_msrs & RAPL_PKG_POWER_INFO) if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; @@ -4791,85 +4831,35 @@ static double rapl_dram_energy_units_probe(int model, double rapl_energy_units) } } -void rapl_probe_intel(unsigned int family, unsigned int model) +void rapl_probe_intel(unsigned int model) { unsigned long long msr; unsigned int time_unit; double tdp; - if (family != 6) - return; - - switch (model) { - case INTEL_FAM6_SANDYBRIDGE: - case INTEL_FAM6_IVYBRIDGE: - case INTEL_FAM6_HASWELL: /* HSW */ - case INTEL_FAM6_HASWELL_L: /* HSW */ - case INTEL_FAM6_HASWELL_G: /* HSW */ - case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_G: /* BDW */ - do_rapl = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO; - break; - case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; - break; - case INTEL_FAM6_ATOM_TREMONT: /* EHL */ - do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX; - break; - case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ - do_rapl = RAPL_PKG_ALL; - break; - case INTEL_FAM6_SKYLAKE_L: /* SKL */ - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX; - break; - case INTEL_FAM6_HASWELL_X: /* HSX */ - case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_X: /* SKX */ - case INTEL_FAM6_ICELAKE_X: /* ICX */ - case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ - case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ - do_rapl = RAPL_PKG_ALL | RAPL_DRAM_ALL; - break; - case INTEL_FAM6_SANDYBRIDGE_X: - case INTEL_FAM6_IVYBRIDGE_X: - do_rapl = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL; - break; - case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ - case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ - do_rapl = RAPL_PKG | RAPL_CORE; - break; - case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ - do_rapl = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS; - break; - default: - return; - } - if (rapl_joules) { - if (do_rapl & RAPL_PKG_ENERGY_STATUS) + if (platform->rapl_msrs & RAPL_PKG_ENERGY_STATUS) BIC_PRESENT(BIC_Pkg_J); - if (do_rapl & RAPL_CORE_ENERGY_STATUS) + if (platform->rapl_msrs & RAPL_CORE_ENERGY_STATUS) BIC_PRESENT(BIC_Cor_J); - if (do_rapl & RAPL_DRAM_ENERGY_STATUS) + if (platform->rapl_msrs & RAPL_DRAM_ENERGY_STATUS) BIC_PRESENT(BIC_RAM_J); - if (do_rapl & RAPL_GFX_ENERGY_STATUS) + if (platform->rapl_msrs & RAPL_GFX_ENERGY_STATUS) BIC_PRESENT(BIC_GFX_J); } else { - if (do_rapl & RAPL_PKG_ENERGY_STATUS) + if (platform->rapl_msrs & RAPL_PKG_ENERGY_STATUS) BIC_PRESENT(BIC_PkgWatt); - if (do_rapl & RAPL_CORE_ENERGY_STATUS) + if (platform->rapl_msrs & RAPL_CORE_ENERGY_STATUS) BIC_PRESENT(BIC_CorWatt); - if (do_rapl & RAPL_DRAM_ENERGY_STATUS) + if (platform->rapl_msrs & RAPL_DRAM_ENERGY_STATUS) BIC_PRESENT(BIC_RAMWatt); - if (do_rapl & RAPL_GFX_ENERGY_STATUS) + if (platform->rapl_msrs & RAPL_GFX_ENERGY_STATUS) BIC_PRESENT(BIC_GFXWatt); } - if (do_rapl & RAPL_PKG_PERF_STATUS) + if (platform->rapl_msrs & RAPL_PKG_PERF_STATUS) BIC_PRESENT(BIC_PKG__); - if (do_rapl & RAPL_DRAM_PERF_STATUS) + if (platform->rapl_msrs & RAPL_DRAM_PERF_STATUS) BIC_PRESENT(BIC_RAM__); /* units on package 0, verify later other packages match */ @@ -4900,22 +4890,10 @@ void rapl_probe_intel(unsigned int family, unsigned int model) void rapl_probe_amd(unsigned int family, unsigned int model) { unsigned long long msr; - unsigned int eax, ebx, ecx, edx; - unsigned int has_rapl = 0; double tdp; UNUSED(model); - if (max_extended_level >= 0x80000007) { - __cpuid(0x80000007, eax, ebx, ecx, edx); - /* RAPL (Fam 17h+) */ - has_rapl = edx & (1 << 14); - } - - if (!has_rapl || family < 0x17) - return; - - do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); @@ -4941,12 +4919,15 @@ void rapl_probe_amd(unsigned int family, unsigned int model) /* * rapl_probe() * - * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units + * sets rapl_power_units, rapl_energy_units, rapl_time_units */ void rapl_probe(unsigned int family, unsigned int model) { + if (!platform->rapl_msrs) + return; + if (genuine_intel) - rapl_probe_intel(family, model); + rapl_probe_intel(model); if (authentic_amd || hygon_genuine) rapl_probe_amd(family, model); } @@ -5040,7 +5021,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) UNUSED(c); UNUSED(p); - if (!do_rapl) + if (!platform->rapl_msrs) return 0; /* RAPL counters are per package, so print only for 1st thread/package */ @@ -5053,7 +5034,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -1; } - if (do_rapl & RAPL_AMD_F17H) { + if (platform->rapl_msrs & RAPL_AMD_F17H) { msr_name = "MSR_RAPL_PWR_UNIT"; if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr)) return -1; @@ -5066,7 +5047,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr, rapl_power_units, rapl_energy_units, rapl_time_units); - if (do_rapl & RAPL_PKG_POWER_INFO) { + if (platform->rapl_msrs & RAPL_PKG_POWER_INFO) { if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) return -5; @@ -5079,7 +5060,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); } - if (do_rapl & RAPL_PKG) { + if (platform->rapl_msrs & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) return -9; @@ -5103,7 +5084,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) cpu, ((msr >> 0) & 0x1FFF) * rapl_power_units, (msr >> 31) & 1 ? "" : "UN"); } - if (do_rapl & RAPL_DRAM_POWER_INFO) { + if (platform->rapl_msrs & RAPL_DRAM_POWER_INFO) { if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) return -6; @@ -5114,7 +5095,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); } - if (do_rapl & RAPL_DRAM) { + if (platform->rapl_msrs & RAPL_DRAM) { if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", @@ -5122,20 +5103,20 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) print_power_limit_msr(cpu, msr, "DRAM Limit"); } - if (do_rapl & RAPL_CORE_POLICY) { + if (platform->rapl_msrs & RAPL_CORE_POLICY) { if (get_msr(cpu, MSR_PP0_POLICY, &msr)) return -7; fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); } - if (do_rapl & RAPL_CORE_POWER_LIMIT) { + if (platform->rapl_msrs & RAPL_CORE_POWER_LIMIT) { if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "Cores Limit"); } - if (do_rapl & RAPL_GFX) { + if (platform->rapl_msrs & RAPL_GFX) { if (get_msr(cpu, MSR_PP1_POLICY, &msr)) return -8; From e338831b14d2da921348c8c4055b9f2c94effe73 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 21:37:46 +0800 Subject: [PATCH 027/299] tools/power/turbostat: Abstract Per Core RAPL support Abstract the support for Per Core RAPL. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 90e1abe96dd5..6314c40dc15b 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -245,7 +245,6 @@ double tsc_tweak = 1.0; unsigned int show_pkg_only; unsigned int show_core_only; char *output_buffer, *outp; -unsigned int do_rapl; unsigned int do_dts; unsigned int do_ptm; unsigned int do_ipc; @@ -289,6 +288,7 @@ struct platform_features { int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ + bool has_per_core_rapl; /* Indicates cores energy collection is per-core, not per-package. AMD specific for now */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ }; @@ -378,7 +378,6 @@ enum rapl_msrs { RAPL_AMD_PWR_UNIT = BIT(14), /* 0xc0010299 MSR_AMD_RAPL_POWER_UNIT */ RAPL_AMD_CORE_ENERGY_STAT = BIT(15), /* 0xc001029a MSR_AMD_CORE_ENERGY_STATUS */ RAPL_AMD_PKG_ENERGY_STAT = BIT(16), /* 0xc001029b MSR_AMD_PKG_ENERGY_STATUS */ - RAPL_PER_CORE_ENERGY = BIT(17), /* Indicates cores energy collection is per-core, not per-package. */ }; #define RAPL_PKG (RAPL_PKG_ENERGY_STATUS | RAPL_PKG_POWER_LIMIT) @@ -680,6 +679,7 @@ static const struct platform_features amd_features = { static const struct platform_features amd_features_with_rapl = { .rapl_msrs = RAPL_AMD_F17H, + .has_per_core_rapl = 1, }; static const struct platform_data turbostat_pdata[] = { @@ -763,10 +763,8 @@ void probe_platform_features(unsigned int family, unsigned int model) __cpuid(0x80000007, eax, ebx, ecx, edx); /* RAPL (Fam 17h+) */ - if ((edx & (1 << 14)) && family >= 0x17) { + if ((edx & (1 << 14)) && family >= 0x17) platform = &amd_features_with_rapl; - do_rapl = RAPL_PER_CORE_ENERGY; - } } return; } @@ -1371,10 +1369,10 @@ void print_header(char *delim) outp += sprintf(outp, "%sCoreThr", (printed++ ? delim : "")); if (platform->rapl_msrs && !rapl_joules) { - if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) + if (DO_BIC(BIC_CorWatt) && platform->has_per_core_rapl) outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); } else if (platform->rapl_msrs && rapl_joules) { - if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) + if (DO_BIC(BIC_Cor_J) && platform->has_per_core_rapl) outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); } @@ -1435,7 +1433,7 @@ void print_header(char *delim) if (platform->rapl_msrs && !rapl_joules) { if (DO_BIC(BIC_PkgWatt)) outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); - if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) + if (DO_BIC(BIC_CorWatt) && !platform->has_per_core_rapl) outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); if (DO_BIC(BIC_GFXWatt)) outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); @@ -1448,7 +1446,7 @@ void print_header(char *delim) } else if (platform->rapl_msrs && rapl_joules) { if (DO_BIC(BIC_Pkg_J)) outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); - if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) + if (DO_BIC(BIC_Cor_J) && !platform->has_per_core_rapl) outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); if (DO_BIC(BIC_GFX_J)) outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); @@ -1750,10 +1748,10 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data fmt8 = "%s%.2f"; - if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) + if (DO_BIC(BIC_CorWatt) && platform->has_per_core_rapl) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float); - if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) + if (DO_BIC(BIC_Cor_J) && platform->has_per_core_rapl) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units); /* print per-package data only for 1st core in package */ @@ -1818,7 +1816,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); - if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) + if (DO_BIC(BIC_CorWatt) && !platform->has_per_core_rapl) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); if (DO_BIC(BIC_GFXWatt)) @@ -1830,7 +1828,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data p->energy_dram * rapl_dram_energy_units / interval_float); if (DO_BIC(BIC_Pkg_J)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); - if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) + if (DO_BIC(BIC_Cor_J) && !platform->has_per_core_rapl) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); if (DO_BIC(BIC_GFX_J)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); From 6d35b8c4a661c849361239fe316035ea952606a3 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 22 Apr 2023 11:59:04 +0800 Subject: [PATCH 028/299] tools/power/turbostat: Abstract RAPL divisor support INTEL_FAM6_ATOM_SILVERMONT model needs a divisor to convert the raw Energy Units value from MSR_RAPL_POWER_UNIT. Abstract the support for RAPL divisor. Delete CPU model check in rapl_probe_intel(). Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 6314c40dc15b..a31724335671 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -289,6 +289,7 @@ struct platform_features { int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ bool has_per_core_rapl; /* Indicates cores energy collection is per-core, not per-package. AMD specific for now */ + bool has_rapl_divisor; /* Divisor for Energy unit raw value from MSR_RAPL_POWER_UNIT */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ }; @@ -595,6 +596,7 @@ static const struct platform_features slv_features = { .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, + .has_rapl_divisor = 1, }; static const struct platform_features slvd_features = { @@ -4865,7 +4867,7 @@ void rapl_probe_intel(unsigned int model) return; rapl_power_units = 1.0 / (1 << (msr & 0xF)); - if (model == INTEL_FAM6_ATOM_SILVERMONT) + if (platform->has_rapl_divisor) rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; else rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); From 9e6f35159cdef148c711d2fb7d5fd2b2b6fb772d Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 22 Apr 2023 12:21:58 +0800 Subject: [PATCH 029/299] tools/power/turbostat: Abstract fixed DRAM Energy unit support Abstract the support for fixed Dram domain energy unit. Delete rapl_dram_energy_units_probe() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 31 +++++++++------------------ 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index a31724335671..a26ae5a2e2bd 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -290,6 +290,7 @@ struct platform_features { int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ bool has_per_core_rapl; /* Indicates cores energy collection is per-core, not per-package. AMD specific for now */ bool has_rapl_divisor; /* Divisor for Energy unit raw value from MSR_RAPL_POWER_UNIT */ + bool has_fixed_rapl_unit; /* Fixed Energy Unit used for DRAM RAPL Domain */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ }; @@ -471,6 +472,7 @@ static const struct platform_features hsx_features = { .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, .plr_msrs = PLR_CORE | PLR_RING, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, + .has_fixed_rapl_unit = 1, }; static const struct platform_features hswl_features = { @@ -529,6 +531,7 @@ static const struct platform_features bdx_features = { .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, + .has_fixed_rapl_unit = 1, }; static const struct platform_features skl_features = { @@ -566,6 +569,7 @@ static const struct platform_features skx_features = { .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, + .has_fixed_rapl_unit = 1, }; static const struct platform_features icx_features = { @@ -577,6 +581,7 @@ static const struct platform_features icx_features = { .cst_limit = CST_LIMIT_ICX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, + .has_fixed_rapl_unit = 1, }; static const struct platform_features spr_features = { @@ -671,6 +676,7 @@ static const struct platform_features knl_features = { .cst_limit = CST_LIMIT_KNL, .trl_msrs = TRL_KNL, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, + .has_fixed_rapl_unit = 1, }; static const struct platform_features default_features = { @@ -4811,26 +4817,6 @@ double get_tdp_amd(unsigned int family) return 280.0; } -/* - * rapl_dram_energy_units_probe() - * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. - */ -static double rapl_dram_energy_units_probe(int model, double rapl_energy_units) -{ - /* only called for genuine_intel, family 6 */ - - switch (model) { - case INTEL_FAM6_HASWELL_X: /* HSX */ - case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_X: /* SKX */ - case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ - case INTEL_FAM6_ICELAKE_X: /* ICX */ - return (rapl_dram_energy_units = 15.3 / 1000000); - default: - return (rapl_energy_units); - } -} - void rapl_probe_intel(unsigned int model) { unsigned long long msr; @@ -4872,7 +4858,10 @@ void rapl_probe_intel(unsigned int model) else rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); - rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units); + if (platform->has_fixed_rapl_unit) + rapl_dram_energy_units = (15.3 / 1000000); + else + rapl_dram_energy_units = rapl_energy_units; time_unit = msr >> 16 & 0xF; if (time_unit == 0) From 7c60409382a4be05d601e0b45db7b0166845b0cf Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 10:21:10 +0800 Subject: [PATCH 030/299] tools/power/turbostat: Abstract hardcoded TDP value Different hardcoded TDP values are used when TDP can not be retrieved from the hardware. Abstract hardcoded TDP value. Delete CPU model checks in get_tdp_intel(). Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index a26ae5a2e2bd..45698c3a9e72 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -291,6 +291,7 @@ struct platform_features { bool has_per_core_rapl; /* Indicates cores energy collection is per-core, not per-package. AMD specific for now */ bool has_rapl_divisor; /* Divisor for Energy unit raw value from MSR_RAPL_POWER_UNIT */ bool has_fixed_rapl_unit; /* Fixed Energy Unit used for DRAM RAPL Domain */ + int rapl_quirk_tdp; /* Hardcoded TDP value when cannot be retrieved from hardware */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ }; @@ -602,6 +603,7 @@ static const struct platform_features slv_features = { .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, .has_rapl_divisor = 1, + .rapl_quirk_tdp = 30, }; static const struct platform_features slvd_features = { @@ -611,6 +613,7 @@ static const struct platform_features slvd_features = { .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE, + .rapl_quirk_tdp = 30, }; static const struct platform_features amt_features = { @@ -688,6 +691,7 @@ static const struct platform_features amd_features = { static const struct platform_features amd_features_with_rapl = { .rapl_msrs = RAPL_AMD_F17H, .has_per_core_rapl = 1, + .rapl_quirk_tdp = 280, /* This is the max stock TDP of HEDT/Server Fam17h+ chips */ }; static const struct platform_data turbostat_pdata[] = { @@ -4792,29 +4796,31 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ +double get_quirk_tdp(void) +{ + if (platform->rapl_quirk_tdp) + return platform->rapl_quirk_tdp; + + return 135.0; +} + double get_tdp_intel(unsigned int model) { unsigned long long msr; + UNUSED(model); + if (platform->rapl_msrs & RAPL_PKG_POWER_INFO) if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; - - switch (model) { - case INTEL_FAM6_ATOM_SILVERMONT: - case INTEL_FAM6_ATOM_SILVERMONT_D: - return 30.0; - default: - return 135.0; - } + return get_quirk_tdp(); } double get_tdp_amd(unsigned int family) { UNUSED(family); - /* This is the max stock TDP of HEDT/Server Fam17h+ chips */ - return 280.0; + return get_quirk_tdp(); } void rapl_probe_intel(unsigned int model) From bf1ad57c3f92c551dbfdcecd49797253f55cb7c1 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 10:28:04 +0800 Subject: [PATCH 031/299] tools/power/turbostat: Remove unused family/model parameters for RAPL functions RAPL probing can be done without family/model checking. Remove these parameters in rapl probe functions. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 45698c3a9e72..44d7321b004e 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4804,26 +4804,22 @@ double get_quirk_tdp(void) return 135.0; } -double get_tdp_intel(unsigned int model) +double get_tdp_intel(void) { unsigned long long msr; - UNUSED(model); - if (platform->rapl_msrs & RAPL_PKG_POWER_INFO) if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; return get_quirk_tdp(); } -double get_tdp_amd(unsigned int family) +double get_tdp_amd(void) { - UNUSED(family); - return get_quirk_tdp(); } -void rapl_probe_intel(unsigned int model) +void rapl_probe_intel(void) { unsigned long long msr; unsigned int time_unit; @@ -4875,20 +4871,18 @@ void rapl_probe_intel(unsigned int model) rapl_time_units = 1.0 / (1 << (time_unit)); - tdp = get_tdp_intel(model); + tdp = get_tdp_intel(); rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; if (!quiet) fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); } -void rapl_probe_amd(unsigned int family, unsigned int model) +void rapl_probe_amd(void) { unsigned long long msr; double tdp; - UNUSED(model); - if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); @@ -4904,7 +4898,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model) rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); rapl_power_units = ldexp(1.0, -(msr & 0xf)); - tdp = get_tdp_amd(family); + tdp = get_tdp_amd(); rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; if (!quiet) @@ -4916,15 +4910,15 @@ void rapl_probe_amd(unsigned int family, unsigned int model) * * sets rapl_power_units, rapl_energy_units, rapl_time_units */ -void rapl_probe(unsigned int family, unsigned int model) +void rapl_probe(void) { if (!platform->rapl_msrs) return; if (genuine_intel) - rapl_probe_intel(model); + rapl_probe_intel(); if (authentic_amd || hygon_genuine) - rapl_probe_amd(family, model); + rapl_probe_amd(); } void prewake_cstate_probe(unsigned int family, unsigned int model) @@ -5828,7 +5822,7 @@ void process_cpuid() if (!quiet && has_slv_msrs(family, model)) decode_c6_demotion_policy_msr(); - rapl_probe(family, model); + rapl_probe(); prewake_cstate_probe(family, model); if (!quiet) From 485a017c45200ed82518f6cdeea554f77e9a0562 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 10:37:23 +0800 Subject: [PATCH 032/299] tools/power/turbostat: Abstract TSC tweak support On some models, the CPU base frequency is different from the TSC frequency, and the aperf/mperf counters are running at CPU base frequency instead of TSC frequency. Abstract support for TSC tweak. Given that tsc_tweak depends on base_hz, move the code to probe_bclk() after base_hz is available. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 44d7321b004e..05385fabc83a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -293,6 +293,7 @@ struct platform_features { bool has_fixed_rapl_unit; /* Fixed Energy Unit used for DRAM RAPL Domain */ int rapl_quirk_tdp; /* Hardcoded TDP value when cannot be retrieved from hardware */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ + bool enable_tsc_tweak; /* Use CPU Base freq instead of TSC freq for aperf/mperf counter */ }; struct platform_data { @@ -546,6 +547,7 @@ static const struct platform_features skl_features = { .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, + .enable_tsc_tweak = 1, }; static const struct platform_features cnl_features = { @@ -558,6 +560,7 @@ static const struct platform_features cnl_features = { .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, + .enable_tsc_tweak = 1, }; static const struct platform_features skx_features = { @@ -660,6 +663,7 @@ static const struct platform_features tmt_features = { .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, + .enable_tsc_tweak = 1, }; static const struct platform_features tmtd_features = { @@ -2934,11 +2938,6 @@ void probe_cst_limit(void) pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; } -static void calculate_tsc_tweak() -{ - tsc_tweak = base_hz / tsc_hz; -} - void prewake_cstate_probe(unsigned int family, unsigned int model); static void dump_platform_info(void) @@ -4198,6 +4197,9 @@ void probe_bclk(void) base_hz = base_ratio * bclk * 1000000; has_base_hz = 1; + + if (platform->enable_tsc_tweak) + tsc_tweak = base_hz / tsc_hz; } /* @@ -5836,9 +5838,6 @@ void process_cpuid() if (!quiet) dump_sysfs_pstate_config(); - if (has_skl_msrs(family, model) || is_ehl(family, model)) - calculate_tsc_tweak(); - if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) BIC_PRESENT(BIC_GFX_rc6); From 3c6a17b8ae44b0116e303402803c173fe2a3da92 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 22:37:37 +0800 Subject: [PATCH 033/299] tools/power/turbostat: Add skeleton support for cstate enumeration Add skeleton support for cstate enumeration. Note that the previous logic may override the cstate setting for multiple times for different reasons. The conversion to new cstate enumeration must be done step by step following the previous code order strictly. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 57 ++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 05385fabc83a..6a49eb941fe0 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -283,6 +283,7 @@ struct platform_features { bool has_config_tdp; /* MSR_CONFIG_TDP_NOMINAL/LEVEL_1/LEVEL_2/CONTROL, MSR_TURBO_ACTIVATION_RATIO */ int bclk_freq; /* CPU base clock */ int crystal_freq; /* Crystal clock to use when not available from CPUID.15 */ + int supported_cstates; /* Core cstates and Package cstates supported */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ bool has_cst_auto_convension; /* AUTOMATIC_CSTATE_CONVERSION bit in MSR_PKG_CST_CONFIG_CONTROL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ @@ -396,6 +397,21 @@ enum rapl_msrs { #define RAPL_AMD_F17H (RAPL_AMD_PWR_UNIT | RAPL_AMD_CORE_ENERGY_STAT | RAPL_AMD_PKG_ENERGY_STAT) +/* For Cstates */ +enum cstates { + CC1 = BIT(0), + CC3 = BIT(1), + CC6 = BIT(2), + CC7 = BIT(3), + PC2 = BIT(4), + PC3 = BIT(5), + PC6 = BIT(6), + PC7 = BIT(7), + PC8 = BIT(8), + PC9 = BIT(9), + PC10 = BIT(10), +}; + static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, @@ -5560,6 +5576,44 @@ void linux_perf_init(void) BIC_PRESENT(BIC_IPC); } +void probe_cstates(void) +{ + probe_cst_limit(); + + if (platform->supported_cstates & CC1) + BIC_PRESENT(BIC_CPU_c1); + + if (platform->supported_cstates & CC3) + BIC_PRESENT(BIC_CPU_c3); + + if (platform->supported_cstates & CC6) + BIC_PRESENT(BIC_CPU_c6); + + if (platform->supported_cstates & CC7) + BIC_PRESENT(BIC_CPU_c7); + + if (platform->supported_cstates & PC2 && (pkg_cstate_limit >= PCL__2)) + BIC_PRESENT(BIC_Pkgpc2); + + if (platform->supported_cstates & PC3 && (pkg_cstate_limit >= PCL__3)) + BIC_PRESENT(BIC_Pkgpc3); + + if (platform->supported_cstates & PC6 && (pkg_cstate_limit >= PCL__6)) + BIC_PRESENT(BIC_Pkgpc6); + + if (platform->supported_cstates & PC7 && (pkg_cstate_limit >= PCL__7)) + BIC_PRESENT(BIC_Pkgpc7); + + if (platform->supported_cstates & PC8 && (pkg_cstate_limit >= PCL__8)) + BIC_PRESENT(BIC_Pkgpc8); + + if (platform->supported_cstates & PC9 && (pkg_cstate_limit >= PCL__9)) + BIC_PRESENT(BIC_Pkgpc9); + + if (platform->supported_cstates & PC10 && (pkg_cstate_limit >= PCL_10)) + BIC_PRESENT(BIC_Pkgpc10); +} + void process_cpuid() { unsigned int eax, ebx, ecx, edx; @@ -5741,7 +5795,8 @@ void process_cpuid() BIC_PRESENT(BIC_IRQ); BIC_PRESENT(BIC_TSC_MHz); - probe_cst_limit(); + probe_cstates(); + if (platform->has_nhm_msrs) { BIC_PRESENT(BIC_CPU_c1); BIC_PRESENT(BIC_CPU_c3); From ce7ddf8af2f96a8a7af4cc2273843518c5810166 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 8 Sep 2023 23:16:27 +0800 Subject: [PATCH 034/299] tools/power/turbostat: Adjust cstate for models with .has_nhm_msrs set Enable CC1/CC3/CC6 for platforms with .has_nhm_msrs set. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 33 +++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 6a49eb941fe0..c7345e0c5185 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -416,6 +416,7 @@ static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_NHM, .trl_msrs = TRL_BASE, }; @@ -424,6 +425,7 @@ static const struct platform_features nhx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_NHM, }; @@ -432,6 +434,7 @@ static const struct platform_features snb_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -442,6 +445,7 @@ static const struct platform_features snx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, @@ -453,6 +457,7 @@ static const struct platform_features ivb_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -463,6 +468,7 @@ static const struct platform_features ivx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE | TRL_LIMIT1, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, @@ -474,6 +480,7 @@ static const struct platform_features hsw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -486,6 +493,7 @@ static const struct platform_features hsx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, .plr_msrs = PLR_CORE | PLR_RING, @@ -499,6 +507,7 @@ static const struct platform_features hswl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -511,6 +520,7 @@ static const struct platform_features hswg_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -523,6 +533,7 @@ static const struct platform_features bdw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -534,6 +545,7 @@ static const struct platform_features bdwg_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -545,6 +557,7 @@ static const struct platform_features bdx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, @@ -559,6 +572,7 @@ static const struct platform_features skl_features = { .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 24000000, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -572,6 +586,7 @@ static const struct platform_features cnl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -585,6 +600,7 @@ static const struct platform_features skx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_SKX, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, @@ -598,6 +614,7 @@ static const struct platform_features icx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_ICX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -610,6 +627,7 @@ static const struct platform_features spr_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_SKX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -618,6 +636,7 @@ static const struct platform_features spr_features = { static const struct platform_features slv_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, @@ -629,6 +648,7 @@ static const struct platform_features slvd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE, @@ -638,6 +658,7 @@ static const struct platform_features slvd_features = { static const struct platform_features amt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_AMT, .trl_msrs = TRL_BASE, }; @@ -647,6 +668,7 @@ static const struct platform_features gmt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -657,6 +679,7 @@ static const struct platform_features gmtd_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 25000000, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS, @@ -667,6 +690,7 @@ static const struct platform_features gmtp_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -676,6 +700,7 @@ static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -686,6 +711,7 @@ static const struct platform_features tmtd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL, @@ -696,6 +722,7 @@ static const struct platform_features knl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC3 | CC6, .cst_limit = CST_LIMIT_KNL, .trl_msrs = TRL_KNL, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -5797,12 +5824,8 @@ void process_cpuid() probe_cstates(); - if (platform->has_nhm_msrs) { - BIC_PRESENT(BIC_CPU_c1); - BIC_PRESENT(BIC_CPU_c3); - BIC_PRESENT(BIC_CPU_c6); + if (platform->has_nhm_msrs) BIC_PRESENT(BIC_SMI); - } probe_bclk(); do_snb_cstates = has_snb_msrs(family, model); From 942c854d8d0f6c6fc0864a9da5f5e374a8e146e5 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 8 Sep 2023 23:16:56 +0800 Subject: [PATCH 035/299] tools/power/turbostat: Adjust cstate for has_snb_msrs() models Enable CC7 and PC2 for has_snb_msrs() models. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 47 ++++++++++++--------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index c7345e0c5185..174a8d0750da 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -434,7 +434,7 @@ static const struct platform_features snb_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -445,7 +445,7 @@ static const struct platform_features snx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, @@ -457,7 +457,7 @@ static const struct platform_features ivb_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -468,7 +468,7 @@ static const struct platform_features ivx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE | TRL_LIMIT1, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, @@ -480,7 +480,7 @@ static const struct platform_features hsw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -493,7 +493,7 @@ static const struct platform_features hsx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, .plr_msrs = PLR_CORE | PLR_RING, @@ -507,7 +507,7 @@ static const struct platform_features hswl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -520,7 +520,7 @@ static const struct platform_features hswg_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -533,7 +533,7 @@ static const struct platform_features bdw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -545,7 +545,7 @@ static const struct platform_features bdwg_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -557,7 +557,7 @@ static const struct platform_features bdx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, @@ -572,7 +572,7 @@ static const struct platform_features skl_features = { .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 24000000, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -586,7 +586,7 @@ static const struct platform_features cnl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -600,7 +600,7 @@ static const struct platform_features skx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_SKX, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, @@ -614,7 +614,7 @@ static const struct platform_features icx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_ICX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -627,7 +627,7 @@ static const struct platform_features spr_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_SKX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -668,7 +668,7 @@ static const struct platform_features gmt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -679,7 +679,7 @@ static const struct platform_features gmtd_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 25000000, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS, @@ -690,7 +690,7 @@ static const struct platform_features gmtp_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -700,7 +700,7 @@ static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -711,7 +711,7 @@ static const struct platform_features tmtd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL, @@ -5829,12 +5829,7 @@ void process_cpuid() probe_bclk(); do_snb_cstates = has_snb_msrs(family, model); - if (do_snb_cstates) - BIC_PRESENT(BIC_CPU_c7); - do_irtl_snb = has_snb_msrs(family, model); - if (do_snb_cstates && (pkg_cstate_limit >= PCL__2)) - BIC_PRESENT(BIC_Pkgpc2); if (pkg_cstate_limit >= PCL__3) BIC_PRESENT(BIC_Pkgpc3); if (pkg_cstate_limit >= PCL__6) From 6f1935c036f79b56b6a1dc6e51c8c6fe483983ec Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 8 Sep 2023 23:17:23 +0800 Subject: [PATCH 036/299] tools/power/turbostat: Adjust cstate for models with .cst_limit set Enable PC3/PC6 for platforms with .cst_limit set because package cstates are guarded by pkg_cstate_limit. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 58 +++++++++++++-------------- 1 file changed, 27 insertions(+), 31 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 174a8d0750da..2bfbf4ccf5ac 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -416,7 +416,7 @@ static const struct platform_features nhm_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_NHM, .trl_msrs = TRL_BASE, }; @@ -425,7 +425,7 @@ static const struct platform_features nhx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_NHM, }; @@ -434,7 +434,7 @@ static const struct platform_features snb_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -445,7 +445,7 @@ static const struct platform_features snx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, @@ -457,7 +457,7 @@ static const struct platform_features ivb_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -468,7 +468,7 @@ static const struct platform_features ivx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE | TRL_LIMIT1, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, @@ -480,7 +480,7 @@ static const struct platform_features hsw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -493,7 +493,7 @@ static const struct platform_features hsx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, .plr_msrs = PLR_CORE | PLR_RING, @@ -507,7 +507,7 @@ static const struct platform_features hswl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -520,7 +520,7 @@ static const struct platform_features hswg_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -533,7 +533,7 @@ static const struct platform_features bdw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -545,7 +545,7 @@ static const struct platform_features bdwg_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -557,7 +557,7 @@ static const struct platform_features bdx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, @@ -572,7 +572,7 @@ static const struct platform_features skl_features = { .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 24000000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -586,7 +586,7 @@ static const struct platform_features cnl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -600,7 +600,7 @@ static const struct platform_features skx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_SKX, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, @@ -614,7 +614,7 @@ static const struct platform_features icx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_ICX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -627,7 +627,7 @@ static const struct platform_features spr_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_SKX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -636,7 +636,7 @@ static const struct platform_features spr_features = { static const struct platform_features slv_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, @@ -648,7 +648,7 @@ static const struct platform_features slvd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE, @@ -658,7 +658,7 @@ static const struct platform_features slvd_features = { static const struct platform_features amt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_133MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_AMT, .trl_msrs = TRL_BASE, }; @@ -668,7 +668,7 @@ static const struct platform_features gmt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -679,7 +679,7 @@ static const struct platform_features gmtd_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 25000000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS, @@ -690,7 +690,7 @@ static const struct platform_features gmtp_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -700,7 +700,7 @@ static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -711,7 +711,7 @@ static const struct platform_features tmtd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL, @@ -722,7 +722,7 @@ static const struct platform_features knl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6, + .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_KNL, .trl_msrs = TRL_KNL, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -5830,10 +5830,6 @@ void process_cpuid() do_snb_cstates = has_snb_msrs(family, model); do_irtl_snb = has_snb_msrs(family, model); - if (pkg_cstate_limit >= PCL__3) - BIC_PRESENT(BIC_Pkgpc3); - if (pkg_cstate_limit >= PCL__6) - BIC_PRESENT(BIC_Pkgpc6); if (do_snb_cstates && (pkg_cstate_limit >= PCL__7)) BIC_PRESENT(BIC_Pkgpc7); if (has_slv_msrs(family, model)) { From 192cbf0468ae31062526287e257f5b56214d2da5 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 8 Sep 2023 23:17:58 +0800 Subject: [PATCH 037/299] tools/power/turbostat: Adjust cstate for has_snb_msrs() models Enable PC7 for has_snb_msrs() models. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 46 ++++++++++++--------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 2bfbf4ccf5ac..f3d44e81d7d5 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -221,7 +221,6 @@ unsigned int rapl_joules; unsigned int summary_only; unsigned int list_header_only; unsigned int dump_only; -unsigned int do_snb_cstates; unsigned int do_knl_cstates; unsigned int do_slm_cstates; unsigned int use_c1_residency_msr; @@ -434,7 +433,7 @@ static const struct platform_features snb_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -445,7 +444,7 @@ static const struct platform_features snx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, @@ -457,7 +456,7 @@ static const struct platform_features ivb_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -468,7 +467,7 @@ static const struct platform_features ivx_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SNB, .trl_msrs = TRL_BASE | TRL_LIMIT1, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, @@ -480,7 +479,7 @@ static const struct platform_features hsw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -493,7 +492,7 @@ static const struct platform_features hsx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, .plr_msrs = PLR_CORE | PLR_RING, @@ -507,7 +506,7 @@ static const struct platform_features hswl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -520,7 +519,7 @@ static const struct platform_features hswg_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -533,7 +532,7 @@ static const struct platform_features bdw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -545,7 +544,7 @@ static const struct platform_features bdwg_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -557,7 +556,7 @@ static const struct platform_features bdx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, @@ -572,7 +571,7 @@ static const struct platform_features skl_features = { .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 24000000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -586,7 +585,7 @@ static const struct platform_features cnl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -600,7 +599,7 @@ static const struct platform_features skx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SKX, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, @@ -614,7 +613,7 @@ static const struct platform_features icx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_ICX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -627,7 +626,7 @@ static const struct platform_features spr_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SKX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -668,7 +667,7 @@ static const struct platform_features gmt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -679,7 +678,7 @@ static const struct platform_features gmtd_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 25000000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS, @@ -690,7 +689,7 @@ static const struct platform_features gmtp_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -700,7 +699,7 @@ static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -711,7 +710,7 @@ static const struct platform_features tmtd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL, @@ -5827,11 +5826,8 @@ void process_cpuid() if (platform->has_nhm_msrs) BIC_PRESENT(BIC_SMI); probe_bclk(); - do_snb_cstates = has_snb_msrs(family, model); do_irtl_snb = has_snb_msrs(family, model); - if (do_snb_cstates && (pkg_cstate_limit >= PCL__7)) - BIC_PRESENT(BIC_Pkgpc7); if (has_slv_msrs(family, model)) { BIC_NOT_PRESENT(BIC_Pkgpc2); BIC_NOT_PRESENT(BIC_Pkgpc3); From ff206149551f09117f44883650a45ae692745703 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:07:08 +0800 Subject: [PATCH 038/299] tools/power/turbostat: Adjust cstate for has_slv_msrs() models Disable PC2/PC3/PC7 and enable PC6 for has_slv_msrs() models. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f3d44e81d7d5..972f5a9b14e6 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -635,7 +635,7 @@ static const struct platform_features spr_features = { static const struct platform_features slv_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, - .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, + .supported_cstates = CC1 | CC3 | CC6 | PC6, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, @@ -5829,10 +5829,6 @@ void process_cpuid() do_irtl_snb = has_snb_msrs(family, model); if (has_slv_msrs(family, model)) { - BIC_NOT_PRESENT(BIC_Pkgpc2); - BIC_NOT_PRESENT(BIC_Pkgpc3); - BIC_PRESENT(BIC_Pkgpc6); - BIC_NOT_PRESENT(BIC_Pkgpc7); BIC_PRESENT(BIC_Mod_c6); use_c1_residency_msr = 1; } From 3d982ac0dafeab860b9d42f5cc41a78753275fdd Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:09:20 +0800 Subject: [PATCH 039/299] tools/power/turbostat: Adjust cstate for is_jvl() models Disable CC3/CC7/PC2/PC3/PC6/PC7 for is_jvl() models. Delete is_jvl() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 972f5a9b14e6..e95972edde3c 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -710,7 +710,7 @@ static const struct platform_features tmtd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL, @@ -4361,21 +4361,6 @@ int is_ehl(unsigned int family, unsigned int model) return 0; } -int is_jvl(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_ATOM_TREMONT_D: - return 1; - } - return 0; -} - static void remove_underbar(char *s) { char *to = s; @@ -5832,14 +5817,6 @@ void process_cpuid() BIC_PRESENT(BIC_Mod_c6); use_c1_residency_msr = 1; } - if (is_jvl(family, model)) { - BIC_NOT_PRESENT(BIC_CPU_c3); - BIC_NOT_PRESENT(BIC_CPU_c7); - BIC_NOT_PRESENT(BIC_Pkgpc2); - BIC_NOT_PRESENT(BIC_Pkgpc3); - BIC_NOT_PRESENT(BIC_Pkgpc6); - BIC_NOT_PRESENT(BIC_Pkgpc7); - } if (is_dnv(family, model)) { BIC_PRESENT(BIC_CPU_c1); BIC_NOT_PRESENT(BIC_CPU_c3); From 8e20ced057423f0edaf40b650facc221e8030b33 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:12:29 +0800 Subject: [PATCH 040/299] tools/power/turbostat: Adjust cstate for is_dnv() models Enable CC1 and disable CC3/CC7/PC3/PC7 for is_dnv() models. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index e95972edde3c..069704ef3c80 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -678,7 +678,7 @@ static const struct platform_features gmtd_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 25000000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS, @@ -5818,11 +5818,6 @@ void process_cpuid() use_c1_residency_msr = 1; } if (is_dnv(family, model)) { - BIC_PRESENT(BIC_CPU_c1); - BIC_NOT_PRESENT(BIC_CPU_c3); - BIC_NOT_PRESENT(BIC_Pkgpc3); - BIC_NOT_PRESENT(BIC_CPU_c7); - BIC_NOT_PRESENT(BIC_Pkgpc7); use_c1_residency_msr = 1; } if (is_skx(family, model) || is_icx(family, model) || is_spr(family, model)) { From 24d16bec379db6eea2d72e18c97c6c80e486a5e1 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 8 Sep 2023 23:18:27 +0800 Subject: [PATCH 041/299] tools/power/turbostat: Adjust cstate for is_skx()/is_icx()/is_spr() models Disable CC3/CC7/PC3/PC7 for is_skx()/is_icx()/is_spr() models. Delete is_skx() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 28 +++------------------------ 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 069704ef3c80..262af40fe35d 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -599,7 +599,7 @@ static const struct platform_features skx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_SKX, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, @@ -613,7 +613,7 @@ static const struct platform_features icx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_ICX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -626,7 +626,7 @@ static const struct platform_features spr_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_SKX, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -4298,22 +4298,6 @@ int is_bdx(unsigned int family, unsigned int model) return 0; } -int is_skx(unsigned int family, unsigned int model) -{ - - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_SKYLAKE_X: - return 1; - } - return 0; -} - int is_icx(unsigned int family, unsigned int model) { @@ -5820,12 +5804,6 @@ void process_cpuid() if (is_dnv(family, model)) { use_c1_residency_msr = 1; } - if (is_skx(family, model) || is_icx(family, model) || is_spr(family, model)) { - BIC_NOT_PRESENT(BIC_CPU_c3); - BIC_NOT_PRESENT(BIC_Pkgpc3); - BIC_NOT_PRESENT(BIC_CPU_c7); - BIC_NOT_PRESENT(BIC_Pkgpc7); - } if (is_bdx(family, model)) { BIC_NOT_PRESENT(BIC_CPU_c7); BIC_NOT_PRESENT(BIC_Pkgpc7); From 1109694817fb4fcdfccb1a86fd58e69fb60f4eab Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:16:52 +0800 Subject: [PATCH 042/299] tools/power/turbostat: Adjust cstate for is_bdx() models Disable CC7/PC7 for is_bdx() models. Delete is_bdx() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 262af40fe35d..27ca29f0545a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -556,7 +556,7 @@ static const struct platform_features bdx_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC3 | CC6 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, @@ -4282,22 +4282,6 @@ int is_dnv(unsigned int family, unsigned int model) return 0; } -int is_bdx(unsigned int family, unsigned int model) -{ - - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_BROADWELL_X: - return 1; - } - return 0; -} - int is_icx(unsigned int family, unsigned int model) { @@ -5804,10 +5788,6 @@ void process_cpuid() if (is_dnv(family, model)) { use_c1_residency_msr = 1; } - if (is_bdx(family, model)) { - BIC_NOT_PRESENT(BIC_CPU_c7); - BIC_NOT_PRESENT(BIC_Pkgpc7); - } if (has_c8910_msrs(family, model)) { if (pkg_cstate_limit >= PCL__8) BIC_PRESENT(BIC_Pkgpc8); From 4d2c95d40a90877ffd8f961055419f1f550a7ed9 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:20:02 +0800 Subject: [PATCH 043/299] tools/power/turbostat: Adjust cstate for has_c8910_msrs() models Enable PC8/PC9/PC10 for has_c8910_msrs() models. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 27ca29f0545a..b0bc973c077d 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -506,7 +506,7 @@ static const struct platform_features hswl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, @@ -532,7 +532,7 @@ static const struct platform_features bdw_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -571,7 +571,7 @@ static const struct platform_features skl_features = { .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 24000000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -585,7 +585,7 @@ static const struct platform_features cnl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -667,7 +667,7 @@ static const struct platform_features gmt_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -689,7 +689,7 @@ static const struct platform_features gmtp_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, .crystal_freq = 19200000, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, @@ -699,7 +699,7 @@ static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, + .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -5788,14 +5788,6 @@ void process_cpuid() if (is_dnv(family, model)) { use_c1_residency_msr = 1; } - if (has_c8910_msrs(family, model)) { - if (pkg_cstate_limit >= PCL__8) - BIC_PRESENT(BIC_Pkgpc8); - if (pkg_cstate_limit >= PCL__9) - BIC_PRESENT(BIC_Pkgpc9); - if (pkg_cstate_limit >= PCL_10) - BIC_PRESENT(BIC_Pkgpc10); - } do_irtl_hsw = has_c8910_msrs(family, model); if (has_skl_msrs(family, model)) { BIC_PRESENT(BIC_Totl_c0); From cd7a2b6a61100a0fe8a40916d1afbd72d8833d57 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:24:19 +0800 Subject: [PATCH 044/299] tools/power/turbostat: Adjust cstate for is_slm()/is_knl()/is_cnl()/is_ehl() models Disable CC3 for is_slm()/is_knl()/is_cnl()/is_ehl() models. Delete is_cnl()/is_ehl() CPU model checks. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 44 +++------------------------ 1 file changed, 5 insertions(+), 39 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index b0bc973c077d..24f470883ca2 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -585,7 +585,7 @@ static const struct platform_features cnl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, + .supported_cstates = CC1 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, @@ -635,7 +635,7 @@ static const struct platform_features spr_features = { static const struct platform_features slv_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, - .supported_cstates = CC1 | CC3 | CC6 | PC6, + .supported_cstates = CC1 | CC6 | PC6, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, @@ -647,7 +647,7 @@ static const struct platform_features slvd_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, - .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, + .supported_cstates = CC1 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_SLV, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE, @@ -699,7 +699,7 @@ static const struct platform_features tmt_features = { .has_msr_misc_pwr_mgmt = 1, .has_nhm_msrs = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, + .supported_cstates = CC1 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_GMT, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -721,7 +721,7 @@ static const struct platform_features knl_features = { .has_nhm_msrs = 1, .has_config_tdp = 1, .bclk_freq = BCLK_100MHZ, - .supported_cstates = CC1 | CC3 | CC6 | PC3 | PC6, + .supported_cstates = CC1 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_KNL, .trl_msrs = TRL_KNL, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -4314,21 +4314,6 @@ int is_spr(unsigned int family, unsigned int model) return 0; } -int is_ehl(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_ATOM_TREMONT: - return 1; - } - return 0; -} - static void remove_underbar(char *s) { char *to = s; @@ -5248,22 +5233,6 @@ int is_knl(unsigned int family, unsigned int model) return 0; } -int is_cnl(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - return 1; - } - - return 0; -} - unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) { if (is_knl(family, model)) @@ -5798,9 +5767,6 @@ void process_cpuid() do_slm_cstates = is_slm(family, model); do_knl_cstates = is_knl(family, model); - if (do_slm_cstates || do_knl_cstates || is_cnl(family, model) || is_ehl(family, model)) - BIC_NOT_PRESENT(BIC_CPU_c3); - if (!quiet) decode_misc_pwr_mgmt_msr(); From 8c382f9e74663072805565036026d4e79de96425 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:27:26 +0800 Subject: [PATCH 045/299] tools/power/turbostat: Use fine grained IRTL output It is pointless to dump the IRTL register for a package cstate that is not supported by the platform. Print IRTL only for states that are available in platform->supported_cstates. Delete has_c8910_msrs() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 99 ++++++++++----------------- 1 file changed, 36 insertions(+), 63 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 24f470883ca2..680373010b01 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -229,7 +229,6 @@ unsigned int has_epb; unsigned int has_turbo; unsigned int is_hybrid; unsigned int do_irtl_snb; -unsigned int do_irtl_hsw; unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int authentic_amd; @@ -3269,39 +3268,47 @@ void print_irtl(void) { unsigned long long msr; - get_msr(base_cpu, MSR_PKGC3_IRTL, &msr); - fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr); - fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", - (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + if (platform->supported_cstates & PC3) { + get_msr(base_cpu, MSR_PKGC3_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + } - get_msr(base_cpu, MSR_PKGC6_IRTL, &msr); - fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr); - fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", - (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + if (platform->supported_cstates & PC6) { + get_msr(base_cpu, MSR_PKGC6_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + } - get_msr(base_cpu, MSR_PKGC7_IRTL, &msr); - fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr); - fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", - (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + if (platform->supported_cstates & PC7) { + get_msr(base_cpu, MSR_PKGC7_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + } - if (!do_irtl_hsw) - return; + if (platform->supported_cstates & PC8) { + get_msr(base_cpu, MSR_PKGC8_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + } - get_msr(base_cpu, MSR_PKGC8_IRTL, &msr); - fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr); - fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", - (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); - - get_msr(base_cpu, MSR_PKGC9_IRTL, &msr); - fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr); - fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", - (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); - - get_msr(base_cpu, MSR_PKGC10_IRTL, &msr); - fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr); - fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", - (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + if (platform->supported_cstates & PC9) { + get_msr(base_cpu, MSR_PKGC9_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + } + if (platform->supported_cstates & PC10) { + get_msr(base_cpu, MSR_PKGC10_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + } } void free_fd_percpu(void) @@ -5145,39 +5152,6 @@ int has_snb_msrs(unsigned int family, unsigned int model) return 0; } -/* - * HSW ULT added support for C8/C9/C10 MSRs: - * - * MSR_PKG_C8_RESIDENCY 0x00000630 - * MSR_PKG_C9_RESIDENCY 0x00000631 - * MSR_PKG_C10_RESIDENCY 0x00000632 - * - * MSR_PKGC8_IRTL 0x00000633 - * MSR_PKGC9_IRTL 0x00000634 - * MSR_PKGC10_IRTL 0x00000635 - * - */ -int has_c8910_msrs(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_HASWELL_L: /* HSW */ - case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_SKYLAKE_L: /* SKL */ - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - case INTEL_FAM6_ATOM_TREMONT: /* EHL */ - return 1; - } - return 0; -} - /* * SKL adds support for additional MSRS: * @@ -5757,7 +5731,6 @@ void process_cpuid() if (is_dnv(family, model)) { use_c1_residency_msr = 1; } - do_irtl_hsw = has_c8910_msrs(family, model); if (has_skl_msrs(family, model)) { BIC_PRESENT(BIC_Totl_c0); BIC_PRESENT(BIC_Any_c0); From 148df4fd04a98fb24198ecb4419c87e07d38af30 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 8 Sep 2023 23:18:51 +0800 Subject: [PATCH 046/299] tools/power/turbostat: Abstract IRTL support Abstract the support for MSR_PKGC3/PKGC6/PKGC7/PKGC8/PKGC9/PKGC10_IRTL. Delete has_snb_msrs() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 72 ++++++++++----------------- 1 file changed, 26 insertions(+), 46 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 680373010b01..44be06b763b2 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -228,7 +228,6 @@ unsigned int has_aperf; unsigned int has_epb; unsigned int has_turbo; unsigned int is_hybrid; -unsigned int do_irtl_snb; unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int authentic_amd; @@ -284,6 +283,7 @@ struct platform_features { int supported_cstates; /* Core cstates and Package cstates supported */ int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ bool has_cst_auto_convension; /* AUTOMATIC_CSTATE_CONVERSION bit in MSR_PKG_CST_CONFIG_CONTROL */ + bool has_irtl_msrs; /* MSR_PKGC3/PKGC6/PKGC7/PKGC8/PKGC9/PKGC10_IRTL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ @@ -434,6 +434,7 @@ static const struct platform_features snb_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SNB, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; @@ -445,6 +446,7 @@ static const struct platform_features snx_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SNB, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, }; @@ -457,6 +459,7 @@ static const struct platform_features ivb_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SNB, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; @@ -468,6 +471,7 @@ static const struct platform_features ivx_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_SNB, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE | TRL_LIMIT1, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM_ALL, }; @@ -480,6 +484,7 @@ static const struct platform_features hsw_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -493,6 +498,7 @@ static const struct platform_features hsx_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE | TRL_LIMIT1 | TRL_LIMIT2, .plr_msrs = PLR_CORE | PLR_RING, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -507,6 +513,7 @@ static const struct platform_features hswl_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -520,6 +527,7 @@ static const struct platform_features hswg_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .plr_msrs = PLR_CORE | PLR_GFX | PLR_RING, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, @@ -533,6 +541,7 @@ static const struct platform_features bdw_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; @@ -545,6 +554,7 @@ static const struct platform_features bdwg_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE_ALL | RAPL_GFX | RAPL_PKG_POWER_INFO, }; @@ -557,6 +567,7 @@ static const struct platform_features bdx_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC3 | CC6 | PC2 | PC3 | PC6, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -572,6 +583,7 @@ static const struct platform_features skl_features = { .crystal_freq = 24000000, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -586,6 +598,7 @@ static const struct platform_features cnl_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -600,6 +613,7 @@ static const struct platform_features skx_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_SKX, + .has_irtl_msrs = 1, .has_cst_auto_convension = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, @@ -614,6 +628,7 @@ static const struct platform_features icx_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_ICX, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, .has_fixed_rapl_unit = 1, @@ -627,6 +642,7 @@ static const struct platform_features spr_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_SKX, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; @@ -668,6 +684,7 @@ static const struct platform_features gmt_features = { .crystal_freq = 19200000, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_GMT, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, }; @@ -679,6 +696,7 @@ static const struct platform_features gmtd_features = { .crystal_freq = 25000000, .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_GMT, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS, }; @@ -690,6 +708,7 @@ static const struct platform_features gmtp_features = { .crystal_freq = 19200000, .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_GMT, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_PKG_POWER_INFO, }; @@ -700,6 +719,7 @@ static const struct platform_features tmt_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_GMT, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, .enable_tsc_tweak = 1, @@ -711,6 +731,7 @@ static const struct platform_features tmtd_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC6, .cst_limit = CST_LIMIT_GMT, + .has_irtl_msrs = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL, }; @@ -3268,6 +3289,9 @@ void print_irtl(void) { unsigned long long msr; + if (!platform->has_irtl_msrs) + return; + if (platform->supported_cstates & PC3) { get_msr(base_cpu, MSR_PKGC3_IRTL, &msr); fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr); @@ -5109,49 +5133,6 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; } -/* - * SNB adds support for additional MSRs: - * - * MSR_PKG_C7_RESIDENCY 0x000003fa - * MSR_CORE_C7_RESIDENCY 0x000003fe - * MSR_PKG_C2_RESIDENCY 0x0000060d - */ - -int has_snb_msrs(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_SANDYBRIDGE: - case INTEL_FAM6_SANDYBRIDGE_X: - case INTEL_FAM6_IVYBRIDGE: /* IVB */ - case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ - case INTEL_FAM6_HASWELL: /* HSW */ - case INTEL_FAM6_HASWELL_X: /* HSW */ - case INTEL_FAM6_HASWELL_L: /* HSW */ - case INTEL_FAM6_HASWELL_G: /* HSW */ - case INTEL_FAM6_BROADWELL: /* BDW */ - case INTEL_FAM6_BROADWELL_G: /* BDW */ - case INTEL_FAM6_BROADWELL_X: /* BDX */ - case INTEL_FAM6_SKYLAKE_L: /* SKL */ - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - case INTEL_FAM6_SKYLAKE_X: /* SKX */ - case INTEL_FAM6_ICELAKE_X: /* ICX */ - case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ - case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ - case INTEL_FAM6_ATOM_TREMONT: /* EHL */ - case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ - return 1; - } - return 0; -} - /* * SKL adds support for additional MSRS: * @@ -5723,7 +5704,6 @@ void process_cpuid() BIC_PRESENT(BIC_SMI); probe_bclk(); - do_irtl_snb = has_snb_msrs(family, model); if (has_slv_msrs(family, model)) { BIC_PRESENT(BIC_Mod_c6); use_c1_residency_msr = 1; @@ -6098,7 +6078,7 @@ void turbostat_init() if (!quiet) for_all_cpus(print_thermal, ODD_COUNTERS); - if (!quiet && do_irtl_snb) + if (!quiet) print_irtl(); if (DO_BIC(BIC_IPC)) From 76d83d2ae8e3099d9a6bd67fba918108824d7d4d Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:41:30 +0800 Subject: [PATCH 047/299] tools/power/turbostat: Abstract MSR_CORE_C1_RES support Abstract the support for MSR_CORE_C1_RES. Delete is_dnv() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 30 ++++++--------------------- 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 44be06b763b2..de9260c96678 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -223,7 +223,6 @@ unsigned int list_header_only; unsigned int dump_only; unsigned int do_knl_cstates; unsigned int do_slm_cstates; -unsigned int use_c1_residency_msr; unsigned int has_aperf; unsigned int has_epb; unsigned int has_turbo; @@ -284,6 +283,7 @@ struct platform_features { int cst_limit; /* MSR_PKG_CST_CONFIG_CONTROL */ bool has_cst_auto_convension; /* AUTOMATIC_CSTATE_CONVERSION bit in MSR_PKG_CST_CONFIG_CONTROL */ bool has_irtl_msrs; /* MSR_PKGC3/PKGC6/PKGC7/PKGC8/PKGC9/PKGC10_IRTL */ + bool has_msr_core_c1_res; /* MSR_CORE_C1_RES */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ @@ -652,6 +652,7 @@ static const struct platform_features slv_features = { .bclk_freq = BCLK_SLV, .supported_cstates = CC1 | CC6 | PC6, .cst_limit = CST_LIMIT_SLV, + .has_msr_core_c1_res = 1, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, .has_rapl_divisor = 1, @@ -697,6 +698,7 @@ static const struct platform_features gmtd_features = { .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_GMT, .has_irtl_msrs = 1, + .has_msr_core_c1_res = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL | RAPL_CORE_ENERGY_STATUS, }; @@ -2069,7 +2071,7 @@ void delta_core(struct core_data *new, struct core_data *old) int soft_c1_residency_display(int bic) { - if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr) + if (!DO_BIC(BIC_CPU_c1) || platform->has_msr_core_c1_res) return 0; return DO_BIC_READ(bic); @@ -2118,7 +2120,7 @@ int delta_thread(struct thread_data *new, struct thread_data *old, struct core_d } } - if (use_c1_residency_msr) { + if (platform->has_msr_core_c1_res) { /* * Some models have a dedicated C1 residency MSR, * which should be more accurate than the derivation below. @@ -2700,7 +2702,7 @@ retry: return -5; t->smi_count = msr & 0xFFFFFFFF; } - if (DO_BIC(BIC_CPU_c1) && use_c1_residency_msr) { + if (DO_BIC(BIC_CPU_c1) && platform->has_msr_core_c1_res) { if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) return -6; } @@ -4297,22 +4299,6 @@ int has_slv_msrs(unsigned int family, unsigned int model) return 0; } -int is_dnv(unsigned int family, unsigned int model) -{ - - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_ATOM_GOLDMONT_D: - return 1; - } - return 0; -} - int is_icx(unsigned int family, unsigned int model) { @@ -5706,10 +5692,6 @@ void process_cpuid() if (has_slv_msrs(family, model)) { BIC_PRESENT(BIC_Mod_c6); - use_c1_residency_msr = 1; - } - if (is_dnv(family, model)) { - use_c1_residency_msr = 1; } if (has_skl_msrs(family, model)) { BIC_PRESENT(BIC_Totl_c0); From 9cc1c1038526a5b6c9a57397def80ba79c260ff2 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:47:30 +0800 Subject: [PATCH 048/299] tools/power/turbostat: Abstract MSR_MODULE_C6_RES_MS support Abstract MSR_MODULE_C6_RES_MS support. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index de9260c96678..5f90d96bd9e3 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -284,6 +284,7 @@ struct platform_features { bool has_cst_auto_convension; /* AUTOMATIC_CSTATE_CONVERSION bit in MSR_PKG_CST_CONFIG_CONTROL */ bool has_irtl_msrs; /* MSR_PKGC3/PKGC6/PKGC7/PKGC8/PKGC9/PKGC10_IRTL */ bool has_msr_core_c1_res; /* MSR_CORE_C1_RES */ + bool has_msr_module_c6_res_ms; /* MSR_MODULE_C6_RES_MS */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ @@ -653,6 +654,7 @@ static const struct platform_features slv_features = { .supported_cstates = CC1 | CC6 | PC6, .cst_limit = CST_LIMIT_SLV, .has_msr_core_c1_res = 1, + .has_msr_module_c6_res_ms = 1, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, .has_rapl_divisor = 1, @@ -5690,9 +5692,9 @@ void process_cpuid() BIC_PRESENT(BIC_SMI); probe_bclk(); - if (has_slv_msrs(family, model)) { + if (platform->has_msr_module_c6_res_ms) BIC_PRESENT(BIC_Mod_c6); - } + if (has_skl_msrs(family, model)) { BIC_PRESENT(BIC_Totl_c0); BIC_PRESENT(BIC_Any_c0); From 6c36882e09dbc9a44d64180ba7972838d3f45488 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 14:52:24 +0800 Subject: [PATCH 049/299] tools/power/turbostat: Abstract MSR_CC6/MC6_DEMOTION_POLICY_CONFIG support Abstract the support for MSR_CC6/MC6_DEMOTION_POLICY_CONFIG. Delete has_slv_msrs() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 29 ++++++--------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 5f90d96bd9e3..f8d7ba87f968 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -285,6 +285,7 @@ struct platform_features { bool has_irtl_msrs; /* MSR_PKGC3/PKGC6/PKGC7/PKGC8/PKGC9/PKGC10_IRTL */ bool has_msr_core_c1_res; /* MSR_CORE_C1_RES */ bool has_msr_module_c6_res_ms; /* MSR_MODULE_C6_RES_MS */ + bool has_msr_c6_demotion_policy_config; /* MSR_CC6_DEMOTION_POLICY_CONFIG/MSR_MC6_DEMOTION_POLICY_CONFIG */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ @@ -655,6 +656,7 @@ static const struct platform_features slv_features = { .cst_limit = CST_LIMIT_SLV, .has_msr_core_c1_res = 1, .has_msr_module_c6_res_ms = 1, + .has_msr_c6_demotion_policy_config = 1, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, .has_rapl_divisor = 1, @@ -4279,28 +4281,6 @@ void probe_bclk(void) tsc_tweak = base_hz / tsc_hz; } -/* - * SLV client has support for unique MSRs: - * - * MSR_CC6_DEMOTION_POLICY_CONFIG - * MSR_MC6_DEMOTION_POLICY_CONFIG - */ - -int has_slv_msrs(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_ATOM_SILVERMONT: - return 1; - } - return 0; -} - int is_icx(unsigned int family, unsigned int model) { @@ -5358,6 +5338,9 @@ void decode_c6_demotion_policy_msr(void) { unsigned long long msr; + if (!platform->has_msr_c6_demotion_policy_config) + return; + if (!get_msr(base_cpu, MSR_CC6_DEMOTION_POLICY_CONFIG, &msr)) fprintf(outf, "cpu%d: MSR_CC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-CC6-Demotion)\n", base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); @@ -5707,7 +5690,7 @@ void process_cpuid() if (!quiet) decode_misc_pwr_mgmt_msr(); - if (!quiet && has_slv_msrs(family, model)) + if (!quiet) decode_c6_demotion_policy_msr(); rapl_probe(); From c8202a6c3acf7bbde42b5e389eec40fd8e1b8358 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 15:26:14 +0800 Subject: [PATCH 050/299] tools/power/turbostat: Abstract MSR_ATOM_PKG_C6_RESIDENCY support Abstract the support for MSR_ATOM_PKG_C6_RESIDENCY. Delete is_slm() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f8d7ba87f968..a04861846d33 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -222,7 +222,6 @@ unsigned int summary_only; unsigned int list_header_only; unsigned int dump_only; unsigned int do_knl_cstates; -unsigned int do_slm_cstates; unsigned int has_aperf; unsigned int has_epb; unsigned int has_turbo; @@ -286,6 +285,7 @@ struct platform_features { bool has_msr_core_c1_res; /* MSR_CORE_C1_RES */ bool has_msr_module_c6_res_ms; /* MSR_MODULE_C6_RES_MS */ bool has_msr_c6_demotion_policy_config; /* MSR_CC6_DEMOTION_POLICY_CONFIG/MSR_MC6_DEMOTION_POLICY_CONFIG */ + bool has_msr_atom_pkg_c6_residency; /* MSR_ATOM_PKG_C6_RESIDENCY */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ @@ -657,6 +657,7 @@ static const struct platform_features slv_features = { .has_msr_core_c1_res = 1, .has_msr_module_c6_res_ms = 1, .has_msr_c6_demotion_policy_config = 1, + .has_msr_atom_pkg_c6_residency = 1, .trl_msrs = TRL_ATOM, .rapl_msrs = RAPL_PKG | RAPL_CORE, .has_rapl_divisor = 1, @@ -669,6 +670,7 @@ static const struct platform_features slvd_features = { .bclk_freq = BCLK_SLV, .supported_cstates = CC1 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_SLV, + .has_msr_atom_pkg_c6_residency = 1, .trl_msrs = TRL_BASE, .rapl_msrs = RAPL_PKG | RAPL_CORE, .rapl_quirk_tdp = 30, @@ -2795,7 +2797,7 @@ retry: if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) return -9; if (DO_BIC(BIC_Pkgpc6)) { - if (do_slm_cstates) { + if (platform->has_msr_atom_pkg_c6_residency) { if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6)) return -10; } else { @@ -5125,22 +5127,6 @@ int has_skl_msrs(unsigned int family, unsigned int model) return 0; } -int is_slm(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ - case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ - return 1; - } - return 0; -} - int is_knl(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -5684,7 +5670,6 @@ void process_cpuid() BIC_PRESENT(BIC_GFX_c0); BIC_PRESENT(BIC_CPUGFX); } - do_slm_cstates = is_slm(family, model); do_knl_cstates = is_knl(family, model); if (!quiet) From 80d132cb45f2cc171395bfaacd74567a183ab160 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 15:00:58 +0800 Subject: [PATCH 051/299] tools/power/turbostat: Abstract MSR_KNL_CORE_C6_RESIDENCY support Abstract the support for MSR_KNL_CORE_C6_RESIDENCY. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index a04861846d33..f0a99e092fa7 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -221,7 +221,6 @@ unsigned int rapl_joules; unsigned int summary_only; unsigned int list_header_only; unsigned int dump_only; -unsigned int do_knl_cstates; unsigned int has_aperf; unsigned int has_epb; unsigned int has_turbo; @@ -286,6 +285,7 @@ struct platform_features { bool has_msr_module_c6_res_ms; /* MSR_MODULE_C6_RES_MS */ bool has_msr_c6_demotion_policy_config; /* MSR_CC6_DEMOTION_POLICY_CONFIG/MSR_MC6_DEMOTION_POLICY_CONFIG */ bool has_msr_atom_pkg_c6_residency; /* MSR_ATOM_PKG_C6_RESIDENCY */ + bool has_msr_knl_core_c6_residency; /* MSR_KNL_CORE_C6_RESIDENCY */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ @@ -751,6 +751,7 @@ static const struct platform_features knl_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC6 | PC3 | PC6, .cst_limit = CST_LIMIT_KNL, + .has_msr_knl_core_c6_residency = 1, .trl_msrs = TRL_KNL, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, .has_fixed_rapl_unit = 1, @@ -2727,10 +2728,10 @@ retry: return -6; } - if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) { + if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !platform->has_msr_knl_core_c6_residency) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; - } else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) { + } else if (platform->has_msr_knl_core_c6_residency && soft_c1_residency_display(BIC_CPU_c6)) { if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) return -7; } @@ -5670,7 +5671,6 @@ void process_cpuid() BIC_PRESENT(BIC_GFX_c0); BIC_PRESENT(BIC_CPUGFX); } - do_knl_cstates = is_knl(family, model); if (!quiet) decode_misc_pwr_mgmt_msr(); From 58ddb691d8d8a281535406766da4e23e0f011126 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 15:07:43 +0800 Subject: [PATCH 052/299] tools/power/turbostat: Abstract extended cstate MSRs support Abstract the support for MSR_PKG_WEIGHTED_CORE_C0_RES, MSR_PKG_ANY_CORE_C0_RES, MSR_PKG_ANY_GFXE_C0_RES and MSR_PKG_BOTH_CORE_GFXE_C0_RES. Delete has_skl_msrs() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 29 ++++----------------------- 1 file changed, 4 insertions(+), 25 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f0a99e092fa7..613b284f2f09 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -286,6 +286,7 @@ struct platform_features { bool has_msr_c6_demotion_policy_config; /* MSR_CC6_DEMOTION_POLICY_CONFIG/MSR_MC6_DEMOTION_POLICY_CONFIG */ bool has_msr_atom_pkg_c6_residency; /* MSR_ATOM_PKG_C6_RESIDENCY */ bool has_msr_knl_core_c6_residency; /* MSR_KNL_CORE_C6_RESIDENCY */ + bool has_ext_cst_msrs; /* MSR_PKG_WEIGHTED_CORE_C0_RES/MSR_PKG_ANY_CORE_C0_RES/MSR_PKG_ANY_GFXE_C0_RES/MSR_PKG_BOTH_CORE_GFXE_C0_RES */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ @@ -586,6 +587,7 @@ static const struct platform_features skl_features = { .supported_cstates = CC1 | CC3 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, .has_irtl_msrs = 1, + .has_ext_cst_msrs = 1, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -601,6 +603,7 @@ static const struct platform_features cnl_features = { .supported_cstates = CC1 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, .has_irtl_msrs = 1, + .has_ext_cst_msrs = 1, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, @@ -5104,30 +5107,6 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; } -/* - * SKL adds support for additional MSRS: - * - * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 - * MSR_PKG_ANY_CORE_C0_RES 0x00000659 - * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A - * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B - */ -int has_skl_msrs(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_SKYLAKE_L: /* SKL */ - case INTEL_FAM6_CANNONLAKE_L: /* CNL */ - return 1; - } - return 0; -} - int is_knl(unsigned int family, unsigned int model) { if (!genuine_intel) @@ -5665,7 +5644,7 @@ void process_cpuid() if (platform->has_msr_module_c6_res_ms) BIC_PRESENT(BIC_Mod_c6); - if (has_skl_msrs(family, model)) { + if (platform->has_ext_cst_msrs) { BIC_PRESENT(BIC_Totl_c0); BIC_PRESENT(BIC_Any_c0); BIC_PRESENT(BIC_GFX_c0); From ed43247b15a4fb4df01c3408fcab6e206f93ab87 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 15:49:10 +0800 Subject: [PATCH 053/299] tools/power/turbostat: Abstract aperf/mperf multiplier support Abstract aperf/mperf multiplier support. Delete is_knl() CPU model check. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 613b284f2f09..65a507d82fc4 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -296,6 +296,7 @@ struct platform_features { int rapl_quirk_tdp; /* Hardcoded TDP value when cannot be retrieved from hardware */ int tcc_offset_bits; /* TCC Offset bits in MSR_IA32_TEMPERATURE_TARGET */ bool enable_tsc_tweak; /* Use CPU Base freq instead of TSC freq for aperf/mperf counter */ + bool need_perf_multiplier; /* mperf/aperf multiplier */ }; struct platform_data { @@ -758,6 +759,7 @@ static const struct platform_features knl_features = { .trl_msrs = TRL_KNL, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, .has_fixed_rapl_unit = 1, + .need_perf_multiplier = 1, }; static const struct platform_features default_features = { @@ -5107,28 +5109,6 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; } -int is_knl(unsigned int family, unsigned int model) -{ - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ - return 1; - } - return 0; -} - -unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) -{ - if (is_knl(family, model)) - return 1024; - return 1; -} - int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned int eax, ebx, ecx, edx; @@ -5630,7 +5610,7 @@ void process_cpuid() } if (has_aperf) - aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); + aperf_mperf_multiplier = platform->need_perf_multiplier ? 1024 : 1; BIC_PRESENT(BIC_IRQ); BIC_PRESENT(BIC_TSC_MHz); From 7d0ebe6f7eaf8c0ac069ddab1fe3793401139fb3 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 15:57:44 +0800 Subject: [PATCH 054/299] tools/power/turbostat: Abstract cstate prewake bit support Abstract cstate prewake bit support. Delete is_icx()/is_spr() CPU model checks. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 47 +++------------------------ 1 file changed, 4 insertions(+), 43 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 65a507d82fc4..580cc2a3b947 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -252,7 +252,6 @@ unsigned int tj_max_override; double rapl_power_units, rapl_time_units; double rapl_dram_energy_units, rapl_energy_units; double rapl_joule_counter_range; -unsigned int dis_cstate_prewake; unsigned int crystal_hz; unsigned long long tsc_hz; int base_cpu; @@ -287,6 +286,7 @@ struct platform_features { bool has_msr_atom_pkg_c6_residency; /* MSR_ATOM_PKG_C6_RESIDENCY */ bool has_msr_knl_core_c6_residency; /* MSR_KNL_CORE_C6_RESIDENCY */ bool has_ext_cst_msrs; /* MSR_PKG_WEIGHTED_CORE_C0_RES/MSR_PKG_ANY_CORE_C0_RES/MSR_PKG_ANY_GFXE_C0_RES/MSR_PKG_BOTH_CORE_GFXE_C0_RES */ + bool has_cst_prewake_bit; /* Cstate prewake bit in MSR_IA32_POWER_CTL */ int trl_msrs; /* MSR_TURBO_RATIO_LIMIT/LIMIT1/LIMIT2/SECONDARY, Atom TRL MSRs */ int plr_msrs; /* MSR_CORE/GFX/RING_PERF_LIMIT_REASONS */ int rapl_msrs; /* RAPL PKG/DRAM/CORE/GFX MSRs, AMD RAPL MSRs */ @@ -635,6 +635,7 @@ static const struct platform_features icx_features = { .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_ICX, .has_irtl_msrs = 1, + .has_cst_prewake_bit = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, .has_fixed_rapl_unit = 1, @@ -649,6 +650,7 @@ static const struct platform_features spr_features = { .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_SKX, .has_irtl_msrs = 1, + .has_cst_prewake_bit = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; @@ -3014,8 +3016,6 @@ void probe_cst_limit(void) pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; } -void prewake_cstate_probe(unsigned int family, unsigned int model); - static void dump_platform_info(void) { unsigned long long msr; @@ -3036,7 +3036,7 @@ static void dump_platform_info(void) base_cpu, msr, msr & 0x2 ? "EN" : "DIS"); /* C-state Pre-wake Disable (CSTATE_PREWAKE_DISABLE) */ - if (dis_cstate_prewake) + if (platform->has_cst_prewake_bit) fprintf(outf, "C-state Pre-wake: %sabled\n", msr & 0x40000000 ? "DIS" : "EN"); return; @@ -4289,38 +4289,6 @@ void probe_bclk(void) tsc_tweak = base_hz / tsc_hz; } -int is_icx(unsigned int family, unsigned int model) -{ - - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_ICELAKE_X: - return 1; - } - return 0; -} - -int is_spr(unsigned int family, unsigned int model) -{ - - if (!genuine_intel) - return 0; - - if (family != 6) - return 0; - - switch (model) { - case INTEL_FAM6_SAPPHIRERAPIDS_X: - return 1; - } - return 0; -} - static void remove_underbar(char *s) { char *to = s; @@ -4910,12 +4878,6 @@ void rapl_probe(void) rapl_probe_amd(); } -void prewake_cstate_probe(unsigned int family, unsigned int model) -{ - if (is_icx(family, model) || is_spr(family, model)) - dis_cstate_prewake = 1; -} - int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; @@ -5638,7 +5600,6 @@ void process_cpuid() decode_c6_demotion_policy_msr(); rapl_probe(); - prewake_cstate_probe(family, model); if (!quiet) dump_cstate_pstate_config_info(); From d085b3b0f11af7d23601b832e0d9e446d18df968 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 21:53:21 +0800 Subject: [PATCH 055/299] tools/power/turbostat: Delete intel_model_duplicates() Now CPU model checks have been cleaned up, no code depends on the duplicated CPU model value. Delete intel_model_duplicates(). Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 59 --------------------------- 1 file changed, 59 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 580cc2a3b947..89f53e1ac63a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5258,63 +5258,6 @@ void decode_c6_demotion_policy_msr(void) base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); } -/* - * When models are the same, for the purpose of turbostat, reuse - */ -unsigned int intel_model_duplicates(unsigned int model) -{ - - switch (model) { - case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ - case INTEL_FAM6_NEHALEM_G: /* Core i7 and i5 Processor - Nehalem */ - case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */ - case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */ - return INTEL_FAM6_NEHALEM; - - case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */ - return INTEL_FAM6_NEHALEM_EX; - - case INTEL_FAM6_XEON_PHI_KNM: - return INTEL_FAM6_XEON_PHI_KNL; - - case INTEL_FAM6_BROADWELL_D: /* BDX-DE */ - return INTEL_FAM6_BROADWELL_X; - - case INTEL_FAM6_SKYLAKE: - case INTEL_FAM6_KABYLAKE_L: - case INTEL_FAM6_KABYLAKE: - case INTEL_FAM6_COMETLAKE_L: - case INTEL_FAM6_COMETLAKE: - return INTEL_FAM6_SKYLAKE_L; - - case INTEL_FAM6_ICELAKE_L: - case INTEL_FAM6_ICELAKE_NNPI: - case INTEL_FAM6_TIGERLAKE_L: - case INTEL_FAM6_TIGERLAKE: - case INTEL_FAM6_ROCKETLAKE: - case INTEL_FAM6_LAKEFIELD: - case INTEL_FAM6_ALDERLAKE: - case INTEL_FAM6_ALDERLAKE_L: - case INTEL_FAM6_ATOM_GRACEMONT: - case INTEL_FAM6_RAPTORLAKE: - case INTEL_FAM6_RAPTORLAKE_P: - case INTEL_FAM6_RAPTORLAKE_S: - case INTEL_FAM6_METEORLAKE: - case INTEL_FAM6_METEORLAKE_L: - return INTEL_FAM6_CANNONLAKE_L; - - case INTEL_FAM6_ATOM_TREMONT_L: - return INTEL_FAM6_ATOM_TREMONT; - - case INTEL_FAM6_ICELAKE_D: - return INTEL_FAM6_ICELAKE_X; - - case INTEL_FAM6_EMERALDRAPIDS_X: - return INTEL_FAM6_SAPPHIRERAPIDS_X; - } - return model; -} - void print_dev_latency(void) { char *path = "/dev/cpu_dma_latency"; @@ -5457,8 +5400,6 @@ void process_cpuid() } probe_platform_features(family, model); - if (genuine_intel) - model = intel_model_duplicates(model); if (!(edx_flags & (1 << 5))) errx(1, "CPUID: no MSR"); From 32e8c6169af7ef7c938b1bd996d27ab171e27d80 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 14:44:06 +0800 Subject: [PATCH 056/299] tools/power/turbostat: Improve probe_platform_features() logic AMD/Hygon platforms that don't have RAPL use 'amd_features' to describe the platform features. Unknown Intel platforms use 'default_features' to describe the platform features. As none of the platform feature is set for 'amd_features' or 'default_features', there is no need to maintain both of them. Remove 'amd_features' structure and improve the logic in probe_platform_features(). Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 89f53e1ac63a..102ba515cf4b 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -767,9 +767,6 @@ static const struct platform_features knl_features = { static const struct platform_features default_features = { }; -static const struct platform_features amd_features = { -}; - static const struct platform_features amd_features_with_rapl = { .rapl_msrs = RAPL_AMD_F17H, .has_per_core_rapl = 1, @@ -849,9 +846,9 @@ void probe_platform_features(unsigned int family, unsigned int model) { int i; - if (authentic_amd || hygon_genuine) { - platform = &amd_features; + platform = &default_features; + if (authentic_amd || hygon_genuine) { if (max_extended_level >= 0x80000007) { unsigned int eax, ebx, ecx, edx; @@ -863,8 +860,6 @@ void probe_platform_features(unsigned int family, unsigned int model) return; } - platform = &default_features; - if (!genuine_intel || family != 6) return; From 045acf6064c5567011163e97c28906e0a7791414 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Tue, 29 Aug 2023 13:47:58 +0800 Subject: [PATCH 057/299] tools/power/turbostat: Relocate cstate probing code Move all cstate probing related code into probe_cstates(). Note that dump_platform_info() actually dumps both MSR_PLATFORM_INFO and MSR_IA32_POWER_CTL. MSR_PLATFORM_INFO is for pstate and MSR_IA32_POWER_CTL is for cstate. So split dump_platform_info() and dump MSR_IA32_POWER_CTL in probe_cstates(). Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 50 +++++++++++++++++---------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 102ba515cf4b..e5ca1586961e 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -272,7 +272,7 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr); struct platform_features { bool has_msr_misc_feature_control; /* MSR_MISC_FEATURE_CONTROL */ bool has_msr_misc_pwr_mgmt; /* MSR_MISC_PWR_MGMT */ - bool has_nhm_msrs; /* MSR_PLATFORM_INFO, MSR_IA32_TEMPERATURE_TARGET, MSR_SMI_COUNT, MSR_PKG_CST_CONFIG_CONTROL, TRL MSRs */ + bool has_nhm_msrs; /* MSR_PLATFORM_INFO, MSR_IA32_TEMPERATURE_TARGET, MSR_SMI_COUNT, MSR_PKG_CST_CONFIG_CONTROL, MSR_IA32_POWER_CTL, TRL MSRs */ bool has_config_tdp; /* MSR_CONFIG_TDP_NOMINAL/LEVEL_1/LEVEL_2/CONTROL, MSR_TURBO_ACTIVATION_RATIO */ int bclk_freq; /* CPU base clock */ int crystal_freq; /* Crystal clock to use when not available from CPUID.15 */ @@ -3025,6 +3025,14 @@ static void dump_platform_info(void) ratio = (msr >> 8) & 0xFF; fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", ratio, bclk, ratio * bclk); +} + +static void dump_power_ctl(void) +{ + unsigned long long msr; + + if (!platform->has_nhm_msrs) + return; get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", @@ -3229,6 +3237,9 @@ static void dump_cst_cfg(void) { unsigned long long msr; + if (!platform->has_nhm_msrs) + return; + get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr); @@ -4332,7 +4343,6 @@ static void dump_cstate_pstate_config_info(void) dump_platform_info(); dump_turbo_ratio_info(); - dump_cst_cfg(); } static int read_sysfs_int(char *path) @@ -5332,6 +5342,25 @@ void probe_cstates(void) if (platform->supported_cstates & PC10 && (pkg_cstate_limit >= PCL_10)) BIC_PRESENT(BIC_Pkgpc10); + + if (platform->has_msr_module_c6_res_ms) + BIC_PRESENT(BIC_Mod_c6); + + if (platform->has_ext_cst_msrs) { + BIC_PRESENT(BIC_Totl_c0); + BIC_PRESENT(BIC_Any_c0); + BIC_PRESENT(BIC_GFX_c0); + BIC_PRESENT(BIC_CPUGFX); + } + + if (quiet) + return; + + dump_power_ctl(); + dump_cst_cfg(); + decode_c6_demotion_policy_msr(); + print_dev_latency(); + dump_sysfs_cstate_config(); } void process_cpuid() @@ -5519,32 +5548,15 @@ void process_cpuid() BIC_PRESENT(BIC_SMI); probe_bclk(); - if (platform->has_msr_module_c6_res_ms) - BIC_PRESENT(BIC_Mod_c6); - - if (platform->has_ext_cst_msrs) { - BIC_PRESENT(BIC_Totl_c0); - BIC_PRESENT(BIC_Any_c0); - BIC_PRESENT(BIC_GFX_c0); - BIC_PRESENT(BIC_CPUGFX); - } - if (!quiet) decode_misc_pwr_mgmt_msr(); - if (!quiet) - decode_c6_demotion_policy_msr(); - rapl_probe(); if (!quiet) dump_cstate_pstate_config_info(); intel_uncore_frequency_probe(); - if (!quiet) - print_dev_latency(); - if (!quiet) - dump_sysfs_cstate_config(); if (!quiet) dump_sysfs_pstate_config(); From 11cd9a09f3e827605cf8fc7c343ddb8c9f4ee95d Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 15:58:10 +0800 Subject: [PATCH 058/299] tools/power/turbostat: Relocate pstate probing code Introduce probe_pstates() and move all pstate probing related code into it. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 39 ++++++++++++++------------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index e5ca1586961e..1bcaee6d0ead 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3016,6 +3016,9 @@ static void dump_platform_info(void) unsigned long long msr; unsigned int ratio; + if (!platform->has_nhm_msrs) + return; + get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); @@ -4313,6 +4316,9 @@ static void dump_turbo_ratio_info(void) if (!has_turbo) return; + if (!platform->has_nhm_msrs) + return; + if (platform->trl_msrs & TRL_LIMIT2) dump_turbo_ratio_limit2(); @@ -4336,15 +4342,6 @@ static void dump_turbo_ratio_info(void) dump_config_tdp(); } -static void dump_cstate_pstate_config_info(void) -{ - if (!platform->has_nhm_msrs) - return; - - dump_platform_info(); - dump_turbo_ratio_info(); -} - static int read_sysfs_int(char *path) { FILE *input; @@ -5363,6 +5360,19 @@ void probe_cstates(void) dump_sysfs_cstate_config(); } +void probe_pstates(void) +{ + probe_bclk(); + + if (quiet) + return; + + dump_platform_info(); + dump_turbo_ratio_info(); + dump_sysfs_pstate_config(); + decode_misc_pwr_mgmt_msr(); +} + void process_cpuid() { unsigned int eax, ebx, ecx, edx; @@ -5542,24 +5552,17 @@ void process_cpuid() BIC_PRESENT(BIC_IRQ); BIC_PRESENT(BIC_TSC_MHz); + probe_pstates(); + probe_cstates(); if (platform->has_nhm_msrs) BIC_PRESENT(BIC_SMI); - probe_bclk(); - - if (!quiet) - decode_misc_pwr_mgmt_msr(); rapl_probe(); - if (!quiet) - dump_cstate_pstate_config_info(); intel_uncore_frequency_probe(); - if (!quiet) - dump_sysfs_pstate_config(); - if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) BIC_PRESENT(BIC_GFX_rc6); From 622c8f23556266a5afdd657aa0518b7d70d5dfc7 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 16:00:15 +0800 Subject: [PATCH 059/299] tools/power/turbostat: Rename uncore probing function Rename intel_uncore_frequency_probe() to probe_intel_uncore_frequency() to be consistent with other probing function names. Probe uncore frequency right after probing cstates. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 1bcaee6d0ead..a956a30d81de 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4378,7 +4378,7 @@ static void dump_sysfs_file(char *path) fprintf(outf, "%s: %s", strrchr(path, '/') + 1, cpuidle_buf); } -static void intel_uncore_frequency_probe(void) +static void probe_intel_uncore_frequency(void) { int i, j; char path[128]; @@ -5556,13 +5556,13 @@ void process_cpuid() probe_cstates(); + probe_intel_uncore_frequency(); + if (platform->has_nhm_msrs) BIC_PRESENT(BIC_SMI); rapl_probe(); - intel_uncore_frequency_probe(); - if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) BIC_PRESENT(BIC_GFX_rc6); From 6cb13609a07ba466b4613fdce7da8c98508069b7 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 16:00:33 +0800 Subject: [PATCH 060/299] tools/power/turbostat: Rename rapl probing function Rename rapl_probe() to probe_rapl() to be consistent with other probing function names. Probe rapl after probing uncore frequency. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index a956a30d81de..bf2b1d1b2627 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4865,11 +4865,11 @@ void rapl_probe_amd(void) } /* - * rapl_probe() + * probe_rapl() * * sets rapl_power_units, rapl_energy_units, rapl_time_units */ -void rapl_probe(void) +void probe_rapl(void) { if (!platform->rapl_msrs) return; @@ -5558,11 +5558,11 @@ void process_cpuid() probe_intel_uncore_frequency(); + probe_rapl(); + if (platform->has_nhm_msrs) BIC_PRESENT(BIC_SMI); - rapl_probe(); - if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) BIC_PRESENT(BIC_GFX_rc6); From 2538d1673d02f66f6bdf01eebf36e271228778e9 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 16:01:12 +0800 Subject: [PATCH 061/299] tools/power/turbostat: Relocate graphics probing code Introduce probe_graphics(), and move all graphics probing related code into it. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bf2b1d1b2627..feff9ecff368 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4424,6 +4424,20 @@ static void probe_intel_uncore_frequency(void) } } +static void probe_graphics(void) +{ + if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) + BIC_PRESENT(BIC_GFX_rc6); + + if (!access("/sys/class/drm/card0/gt_cur_freq_mhz", R_OK) || + !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) + BIC_PRESENT(BIC_GFXMHz); + + if (!access("/sys/class/drm/card0/gt_act_freq_mhz", R_OK) || + !access("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", R_OK)) + BIC_PRESENT(BIC_GFXACTMHz); +} + static void dump_sysfs_cstate_config(void) { char path[64]; @@ -5558,22 +5572,13 @@ void process_cpuid() probe_intel_uncore_frequency(); + probe_graphics(); + probe_rapl(); if (platform->has_nhm_msrs) BIC_PRESENT(BIC_SMI); - if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) - BIC_PRESENT(BIC_GFX_rc6); - - if (!access("/sys/class/drm/card0/gt_cur_freq_mhz", R_OK) || - !access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) - BIC_PRESENT(BIC_GFXMHz); - - if (!access("/sys/class/drm/card0/gt_act_freq_mhz", R_OK) || - !access("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", R_OK)) - BIC_PRESENT(BIC_GFXACTMHz); - if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK)) BIC_PRESENT(BIC_CPU_LPI); else From e7d7b82de192464b733fb2bcc9e305ea6f6ea47e Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 16:01:36 +0800 Subject: [PATCH 062/299] tools/power/turbostat: Relocate lpi probing code Introduce probe_lpi(), and move all lpi probing related code into it. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 38 ++++++++++++++++----------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index feff9ecff368..ad9147757d5a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5374,6 +5374,26 @@ void probe_cstates(void) dump_sysfs_cstate_config(); } +void probe_lpi(void) +{ + if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK)) + BIC_PRESENT(BIC_CPU_LPI); + else + BIC_NOT_PRESENT(BIC_CPU_LPI); + + if (!access(sys_lpi_file_sysfs, R_OK)) { + sys_lpi_file = sys_lpi_file_sysfs; + BIC_PRESENT(BIC_SYS_LPI); + } else if (!access(sys_lpi_file_debugfs, R_OK)) { + sys_lpi_file = sys_lpi_file_debugfs; + BIC_PRESENT(BIC_SYS_LPI); + } else { + sys_lpi_file_sysfs = NULL; + BIC_NOT_PRESENT(BIC_SYS_LPI); + } + +} + void probe_pstates(void) { probe_bclk(); @@ -5570,6 +5590,8 @@ void process_cpuid() probe_cstates(); + probe_lpi(); + probe_intel_uncore_frequency(); probe_graphics(); @@ -5579,27 +5601,11 @@ void process_cpuid() if (platform->has_nhm_msrs) BIC_PRESENT(BIC_SMI); - if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK)) - BIC_PRESENT(BIC_CPU_LPI); - else - BIC_NOT_PRESENT(BIC_CPU_LPI); - if (!access("/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count", R_OK)) BIC_PRESENT(BIC_CORE_THROT_CNT); else BIC_NOT_PRESENT(BIC_CORE_THROT_CNT); - if (!access(sys_lpi_file_sysfs, R_OK)) { - sys_lpi_file = sys_lpi_file_sysfs; - BIC_PRESENT(BIC_SYS_LPI); - } else if (!access(sys_lpi_file_debugfs, R_OK)) { - sys_lpi_file = sys_lpi_file_debugfs; - BIC_PRESENT(BIC_SYS_LPI); - } else { - sys_lpi_file_sysfs = NULL; - BIC_NOT_PRESENT(BIC_SYS_LPI); - } - if (!quiet) decode_misc_feature_control(); From db735f8ba78bf7692579b78f0ca1e40563ef79fd Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 31 Aug 2023 16:02:16 +0800 Subject: [PATCH 063/299] tools/power/turbostat: Relocate thermal probing code Introduce probe_thermal(), and move all thermal probing related code into it. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index ad9147757d5a..8dae576234a4 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4894,6 +4894,14 @@ void probe_rapl(void) rapl_probe_amd(); } +void probe_thermal(void) +{ + if (!access("/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count", R_OK)) + BIC_PRESENT(BIC_CORE_THROT_CNT); + else + BIC_NOT_PRESENT(BIC_CORE_THROT_CNT); +} + int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; @@ -5598,14 +5606,11 @@ void process_cpuid() probe_rapl(); + probe_thermal(); + if (platform->has_nhm_msrs) BIC_PRESENT(BIC_SMI); - if (!access("/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count", R_OK)) - BIC_PRESENT(BIC_CORE_THROT_CNT); - else - BIC_NOT_PRESENT(BIC_CORE_THROT_CNT); - if (!quiet) decode_misc_feature_control(); From ce7a32c2a4cdba06ed5ead4ed8f980c893e20cd2 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Wed, 30 Aug 2023 16:36:06 +0800 Subject: [PATCH 064/299] tools/power/turbostat: Reorder some functions Reorder some functions to solve code depdency introduced by next patch. No functional change. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 214 +++++++++++++------------- 1 file changed, 107 insertions(+), 107 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 8dae576234a4..eb333612bdec 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4878,92 +4878,6 @@ void rapl_probe_amd(void) fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); } -/* - * probe_rapl() - * - * sets rapl_power_units, rapl_energy_units, rapl_time_units - */ -void probe_rapl(void) -{ - if (!platform->rapl_msrs) - return; - - if (genuine_intel) - rapl_probe_intel(); - if (authentic_amd || hygon_genuine) - rapl_probe_amd(); -} - -void probe_thermal(void) -{ - if (!access("/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count", R_OK)) - BIC_PRESENT(BIC_CORE_THROT_CNT); - else - BIC_NOT_PRESENT(BIC_CORE_THROT_CNT); -} - -int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) -{ - unsigned long long msr; - unsigned int dts, dts2; - int cpu; - - UNUSED(c); - UNUSED(p); - - if (!(do_dts || do_ptm)) - return 0; - - cpu = t->cpu_id; - - /* DTS is per-core, no need to print for each thread */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) - return 0; - - if (cpu_migrate(cpu)) { - fprintf(outf, "print_thermal: Could not migrate to CPU %d\n", cpu); - return -1; - } - - if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { - if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) - return 0; - - dts = (msr >> 16) & 0x7F; - fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tj_max - dts); - - if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) - return 0; - - dts = (msr >> 16) & 0x7F; - dts2 = (msr >> 8) & 0x7F; - fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", - cpu, msr, tj_max - dts, tj_max - dts2); - } - - if (do_dts && debug) { - unsigned int resolution; - - if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) - return 0; - - dts = (msr >> 16) & 0x7F; - resolution = (msr >> 27) & 0xF; - fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", - cpu, msr, tj_max - dts, resolution); - - if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) - return 0; - - dts = (msr >> 16) & 0x7F; - dts2 = (msr >> 8) & 0x7F; - fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", - cpu, msr, tj_max - dts, tj_max - dts2); - } - - return 0; -} - void print_power_limit_msr(int cpu, unsigned long long msr, char *label) { fprintf(outf, "cpu%d: %s: %sabled (%0.3f Watts, %f sec, clamp %sabled)\n", @@ -5095,29 +5009,20 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; } -int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p) +/* + * probe_rapl() + * + * sets rapl_power_units, rapl_energy_units, rapl_time_units + */ +void probe_rapl(void) { - unsigned int eax, ebx, ecx, edx; + if (!platform->rapl_msrs) + return; - UNUSED(c); - UNUSED(p); - - if (!genuine_intel) - return 0; - - if (cpu_migrate(t->cpu_id)) { - fprintf(outf, "Could not migrate to CPU %d\n", t->cpu_id); - return -1; - } - - if (max_level < 0x1a) - return 0; - - __cpuid(0x1a, eax, ebx, ecx, edx); - eax = (eax >> 24) & 0xFF; - if (eax == 0x20) - t->is_atom = true; - return 0; + if (genuine_intel) + rapl_probe_intel(); + if (authentic_amd || hygon_genuine) + rapl_probe_amd(); } /* @@ -5200,6 +5105,101 @@ guess: return 0; } +int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + unsigned long long msr; + unsigned int dts, dts2; + int cpu; + + UNUSED(c); + UNUSED(p); + + if (!(do_dts || do_ptm)) + return 0; + + cpu = t->cpu_id; + + /* DTS is per-core, no need to print for each thread */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + return 0; + + if (cpu_migrate(cpu)) { + fprintf(outf, "print_thermal: Could not migrate to CPU %d\n", cpu); + return -1; + } + + if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { + if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) + return 0; + + dts = (msr >> 16) & 0x7F; + fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tj_max - dts); + + if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) + return 0; + + dts = (msr >> 16) & 0x7F; + dts2 = (msr >> 8) & 0x7F; + fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", + cpu, msr, tj_max - dts, tj_max - dts2); + } + + if (do_dts && debug) { + unsigned int resolution; + + if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) + return 0; + + dts = (msr >> 16) & 0x7F; + resolution = (msr >> 27) & 0xF; + fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", + cpu, msr, tj_max - dts, resolution); + + if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) + return 0; + + dts = (msr >> 16) & 0x7F; + dts2 = (msr >> 8) & 0x7F; + fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", + cpu, msr, tj_max - dts, tj_max - dts2); + } + + return 0; +} + +void probe_thermal(void) +{ + if (!access("/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count", R_OK)) + BIC_PRESENT(BIC_CORE_THROT_CNT); + else + BIC_NOT_PRESENT(BIC_CORE_THROT_CNT); +} + +int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + unsigned int eax, ebx, ecx, edx; + + UNUSED(c); + UNUSED(p); + + if (!genuine_intel) + return 0; + + if (cpu_migrate(t->cpu_id)) { + fprintf(outf, "Could not migrate to CPU %d\n", t->cpu_id); + return -1; + } + + if (max_level < 0x1a) + return 0; + + __cpuid(0x1a, eax, ebx, ecx, edx); + eax = (eax >> 24) & 0xFF; + if (eax == 0x20) + t->is_atom = true; + return 0; +} + void decode_feature_control_msr(void) { unsigned long long msr; From 5612b2c89bd0a3a2f7c9e8756e6ead971b03f8a3 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Wed, 30 Aug 2023 16:54:51 +0800 Subject: [PATCH 065/299] tools/power/turbostat: Relocate more probing related code Relocate more feature probing code outside of process_cpuids() into the corresponding probing functions. This improves the readability of code and the turbostat output. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 37 ++++++++++++--------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index eb333612bdec..ffeee48e8d85 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5023,6 +5023,11 @@ void probe_rapl(void) rapl_probe_intel(); if (authentic_amd || hygon_genuine) rapl_probe_amd(); + + if (quiet) + return; + + for_all_cpus(print_rapl, ODD_COUNTERS); } /* @@ -5173,6 +5178,13 @@ void probe_thermal(void) BIC_PRESENT(BIC_CORE_THROT_CNT); else BIC_NOT_PRESENT(BIC_CORE_THROT_CNT); + + for_all_cpus(set_temperature_target, ODD_COUNTERS); + + if (quiet) + return; + + for_all_cpus(print_thermal, ODD_COUNTERS); } int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p) @@ -5380,6 +5392,7 @@ void probe_cstates(void) decode_c6_demotion_policy_msr(); print_dev_latency(); dump_sysfs_cstate_config(); + print_irtl(); } void probe_lpi(void) @@ -5413,6 +5426,10 @@ void probe_pstates(void) dump_turbo_ratio_info(); dump_sysfs_pstate_config(); decode_misc_pwr_mgmt_msr(); + + for_all_cpus(print_hwp, ODD_COUNTERS); + for_all_cpus(print_epb, ODD_COUNTERS); + for_all_cpus(print_perf_limit, ODD_COUNTERS); } void process_cpuid() @@ -5897,29 +5914,9 @@ void turbostat_init() process_cpuid(); linux_perf_init(); - if (!quiet) - for_all_cpus(print_hwp, ODD_COUNTERS); - - if (!quiet) - for_all_cpus(print_epb, ODD_COUNTERS); - - if (!quiet) - for_all_cpus(print_perf_limit, ODD_COUNTERS); - - if (!quiet) - for_all_cpus(print_rapl, ODD_COUNTERS); - - for_all_cpus(set_temperature_target, ODD_COUNTERS); - for_all_cpus(get_cpu_type, ODD_COUNTERS); for_all_cpus(get_cpu_type, EVEN_COUNTERS); - if (!quiet) - for_all_cpus(print_thermal, ODD_COUNTERS); - - if (!quiet) - print_irtl(); - if (DO_BIC(BIC_IPC)) (void)get_instr_count_fd(base_cpu); } From 7ee39d8d593e3d28eced0fa1a8c8c6bdcbd4156e Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Wed, 13 Sep 2023 23:32:39 +0800 Subject: [PATCH 066/299] tools/power/turbostat: Introduce probe_pm_features() Feature probe has nothing to do with CPUID, thus it should not be in process_cpuids(). Introduce probe_pm_features() and move all feature probing functions into it. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index ffeee48e8d85..607152b36c1a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5610,7 +5610,10 @@ void process_cpuid() BIC_PRESENT(BIC_IRQ); BIC_PRESENT(BIC_TSC_MHz); +} +void probe_pm_features(void) +{ probe_pstates(); probe_cstates(); @@ -5630,8 +5633,6 @@ void process_cpuid() if (!quiet) decode_misc_feature_control(); - - return; } /* @@ -5912,6 +5913,7 @@ void turbostat_init() check_dev_msr(); check_permissions(); process_cpuid(); + probe_pm_features(); linux_perf_init(); for_all_cpus(get_cpu_type, ODD_COUNTERS); From 05ad96ff0fb9d1b16abb5022b9c62636c6780fc2 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 27 Aug 2023 15:33:27 +0800 Subject: [PATCH 067/299] tools/power/turbostat: Enable MSR_CORE_C1_RES on recent Intel client platforms All recent Intel client platforms have MSR_CORE_C1_RES. Enable the support on these platforms, including CNL/ICL/LKF/RKL/TGL/ADL/RPL/MTL. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 607152b36c1a..9895f348b637 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -604,6 +604,7 @@ static const struct platform_features cnl_features = { .supported_cstates = CC1 | CC6 | CC7 | PC2 | PC3 | PC6 | PC7 | PC8 | PC9 | PC10, .cst_limit = CST_LIMIT_HSW, .has_irtl_msrs = 1, + .has_msr_core_c1_res = 1, .has_ext_cst_msrs = 1, .trl_msrs = TRL_BASE, .tcc_offset_bits = 6, From 6b74a30b767e362eda7deeac52edcd546c5f6d8f Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 9 Sep 2023 13:26:51 +0800 Subject: [PATCH 068/299] tools/power/turbostat: Remove PC7/PC9 support on ADL/RPL Compared with other platforms that share cnl_features, ADL/RPL don't have PC7/PC9. Clone a new platform feature set from cnl_features for ADL/RPL, with PC7/PC9 removed. Signed-off-by: Zhang Rui Reviewed-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 29 +++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9895f348b637..a769daa59b12 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -612,6 +612,23 @@ static const struct platform_features cnl_features = { .enable_tsc_tweak = 1, }; +static const struct platform_features adl_features = { + .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, + .has_config_tdp = 1, + .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC6 | CC7 | PC2 | PC3 | PC6 | PC8 | PC10, + .cst_limit = CST_LIMIT_HSW, + .has_irtl_msrs = 1, + .has_msr_core_c1_res = 1, + .has_ext_cst_msrs = 1, + .trl_msrs = TRL_BASE, + .tcc_offset_bits = 6, + .rapl_msrs = RAPL_PKG_ALL | RAPL_CORE_ALL | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_GFX, + .enable_tsc_tweak = 1, +}; + static const struct platform_features skx_features = { .has_msr_misc_feature_control = 1, .has_msr_misc_pwr_mgmt = 1, @@ -812,11 +829,11 @@ static const struct platform_data turbostat_pdata[] = { { INTEL_FAM6_SAPPHIRERAPIDS_X, &spr_features }, { INTEL_FAM6_EMERALDRAPIDS_X, &spr_features }, { INTEL_FAM6_LAKEFIELD, &cnl_features }, - { INTEL_FAM6_ALDERLAKE, &cnl_features }, - { INTEL_FAM6_ALDERLAKE_L, &cnl_features }, - { INTEL_FAM6_RAPTORLAKE, &cnl_features }, - { INTEL_FAM6_RAPTORLAKE_P, &cnl_features }, - { INTEL_FAM6_RAPTORLAKE_S, &cnl_features }, + { INTEL_FAM6_ALDERLAKE, &adl_features }, + { INTEL_FAM6_ALDERLAKE_L, &adl_features }, + { INTEL_FAM6_RAPTORLAKE, &adl_features }, + { INTEL_FAM6_RAPTORLAKE_P, &adl_features }, + { INTEL_FAM6_RAPTORLAKE_S, &adl_features }, { INTEL_FAM6_METEORLAKE, &cnl_features }, { INTEL_FAM6_METEORLAKE_L, &cnl_features }, { INTEL_FAM6_ATOM_SILVERMONT, &slv_features }, @@ -828,7 +845,7 @@ static const struct platform_data turbostat_pdata[] = { { INTEL_FAM6_ATOM_TREMONT_D, &tmtd_features }, { INTEL_FAM6_ATOM_TREMONT, &tmt_features }, { INTEL_FAM6_ATOM_TREMONT_L, &tmt_features }, - { INTEL_FAM6_ATOM_GRACEMONT, &cnl_features }, + { INTEL_FAM6_ATOM_GRACEMONT, &adl_features }, { INTEL_FAM6_XEON_PHI_KNL, &knl_features }, { INTEL_FAM6_XEON_PHI_KNM, &knl_features }, /* From 71cfd1da9f0635ccd7124ad8b67b9aed596be491 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 6 Oct 2023 11:01:17 +0800 Subject: [PATCH 069/299] tools/power/turbostat: Introduce cpu_allowed_set Turbostat supports "-c" parameter which limits output to system summary plus the specified cpu-set. But some code still uses cpu_present_set to read and dump the counters. Introduce cpu_allowed_set for code that should obey the specified cpu-set. No functional change. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 31 ++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index a769daa59b12..d8f44ea5b4bf 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -904,8 +904,8 @@ int backwards_count; char *progname; #define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */ -cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset; -size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size; +cpu_set_t *cpu_present_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset; +size_t cpu_present_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size; #define MAX_ADDED_COUNTERS 8 #define MAX_ADDED_THREAD_COUNTERS 24 #define BITMASK_SIZE 32 @@ -1157,6 +1157,11 @@ int cpu_is_not_present(int cpu) return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); } +int cpu_is_not_allowed(int cpu) +{ + return !CPU_ISSET_S(cpu, cpu_allowed_setsize, cpu_allowed_set); +} + /* * run func(thread, core, package) in topology order * skip non-present cpus @@ -3396,6 +3401,10 @@ void free_all_buffers(void) cpu_present_set = NULL; cpu_present_setsize = 0; + CPU_FREE(cpu_allowed_set); + cpu_allowed_set = NULL; + cpu_allowed_setsize = 0; + CPU_FREE(cpu_affinity_set); cpu_affinity_set = NULL; cpu_affinity_setsize = 0; @@ -5697,13 +5706,29 @@ void topology_probe() CPU_ZERO_S(cpu_present_setsize, cpu_present_set); for_all_proc_cpus(mark_cpu_present); + /* + * Allocate and initialize cpu_allowed_set + */ + cpu_allowed_set = CPU_ALLOC((topo.max_cpu_num + 1)); + if (cpu_allowed_set == NULL) + err(3, "CPU_ALLOC"); + cpu_allowed_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); + CPU_ZERO_S(cpu_allowed_setsize, cpu_allowed_set); + /* * Validate that all cpus in cpu_subset are also in cpu_present_set */ for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) { - if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) + if (!cpu_subset) { + if (CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) + CPU_SET_S(i, cpu_allowed_setsize, cpu_allowed_set); + continue; + } + if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) { if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) err(1, "cpu%d not present", i); + CPU_SET_S(i, cpu_allowed_setsize, cpu_allowed_set); + } } /* From 4ede6d1ce7acba9cafe7df4e935b174623cd2181 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Wed, 4 Oct 2023 13:52:02 +0800 Subject: [PATCH 070/299] tools/power/turbostat: Obey allowed CPUs when accessing CPU counters for_all_cpus/for_all_cpus_2 are used for accessing the per CPU counters, and they should follow the cpu_allowed_set instead of cpu_present_set. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index d8f44ea5b4bf..202cf5231d7a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1182,7 +1182,7 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); - if (cpu_is_not_present(t->cpu_id)) + if (cpu_is_not_allowed(t->cpu_id)) continue; c = GET_CORE(core_base, core_no, node_no, pkg_no); @@ -3618,7 +3618,7 @@ int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *, t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); - if (cpu_is_not_present(t->cpu_id)) + if (cpu_is_not_allowed(t->cpu_id)) continue; t2 = GET_THREAD(thread_base2, thread_no, core_no, node_no, pkg_no); From 7bb3fe27ad4f62039b2ac80a2147452a608b474f Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Wed, 4 Oct 2023 14:25:25 +0800 Subject: [PATCH 071/299] tools/power/turbostat: Obey allowed CPUs during startup Set turbostat CPU affinity to make sure turbostat is running on one of the allowed CPUs. Set base_cpu to the first allowed CPU so that some platform information is dumped using one of the allowed CPUs. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 202cf5231d7a..f81ce832d17a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -5731,6 +5731,10 @@ void topology_probe() } } + if (!CPU_COUNT_S(cpu_allowed_setsize, cpu_allowed_set)) + err(-ENODEV, "No valid cpus found"); + sched_setaffinity(0, cpu_allowed_setsize, cpu_allowed_set); + /* * Allocate and initialize cpu_affinity_set */ @@ -5941,12 +5945,17 @@ void setup_all_buffers(void) void set_base_cpu(void) { - base_cpu = sched_getcpu(); - if (base_cpu < 0) - err(-ENODEV, "No valid cpus found"); + int i; - if (debug > 1) - fprintf(outf, "base_cpu = %d\n", base_cpu); + for (i = 0; i < topo.max_cpu_num + 1; ++i) { + if (cpu_is_not_allowed(i)) + continue; + base_cpu = i; + if (debug > 1) + fprintf(outf, "base_cpu = %d\n", base_cpu); + return; + } + err(-ENODEV, "No valid cpus found"); } void turbostat_init() @@ -5976,8 +5985,6 @@ int fork_it(char **argv) first_counter_read = 0; if (status) exit(status); - /* clear affinity side-effect of get_counters() */ - sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); child_pid = fork(); From 74318add132365db3026e281ac06836a26cda857 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Wed, 4 Oct 2023 15:13:23 +0800 Subject: [PATCH 072/299] tools/power/turbostat: Abstract several functions When detecting the primary thread/core in a core/package, current code doesn't handle the allowed CPUs. Abstract several functions for further fix of this issue. No functional change. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 58 +++++++++++++++++++-------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f81ce832d17a..5f6bc076e1dd 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1198,6 +1198,30 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk return 0; } +int is_cpu_first_thread_in_core(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + UNUSED(c); + UNUSED(p); + + return (t->flags & CPU_IS_FIRST_THREAD_IN_CORE); +} + +int is_cpu_first_core_in_package(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + UNUSED(c); + UNUSED(p); + + return (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE); +} + +int is_cpu_first_thread_in_package(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + UNUSED(c); + UNUSED(p); + + return (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE); +} + int cpu_migrate(int cpu) { CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); @@ -1682,11 +1706,11 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data int printed = 0; /* if showing only 1st thread in core and this isn't one, bail out */ - if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + if (show_core_only && !is_cpu_first_thread_in_core(t, c, p)) return 0; /* if showing only 1st thread in pkg and this isn't one, bail out */ - if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (show_pkg_only && !is_cpu_first_core_in_package(t, c, p)) return 0; /*if not summary line and --cpu is used */ @@ -1820,7 +1844,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1 / tsc); /* print per-core data only for 1st thread in core */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + if (!is_cpu_first_thread_in_core(t, c, p)) goto done; if (DO_BIC(BIC_CPU_c3)) @@ -1867,7 +1891,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units); /* print per-package data only for 1st core in package */ - if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (!is_cpu_first_core_in_package(t, c, p)) goto done; /* PkgTmp */ @@ -2202,7 +2226,7 @@ int delta_cpu(struct thread_data *t, struct core_data *c, int retval = 0; /* calculate core delta only for 1st thread in core */ - if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) + if (is_cpu_first_thread_in_core(t, c, p)) delta_core(c, c2); /* always calculate thread delta */ @@ -2211,7 +2235,7 @@ int delta_cpu(struct thread_data *t, struct core_data *c, return retval; /* calculate package delta only for 1st core in package */ - if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) + if (is_cpu_first_core_in_package(t, c, p)) retval = delta_package(p, p2); return retval; @@ -2325,7 +2349,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) } /* sum per-core values only for 1st thread in core */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + if (!is_cpu_first_thread_in_core(t, c, p)) return 0; average.cores.c3 += c->c3; @@ -2345,7 +2369,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) } /* sum per-pkg values only for 1st core in pkg */ - if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (!is_cpu_first_core_in_package(t, c, p)) return 0; if (DO_BIC(BIC_Totl_c0)) @@ -2745,7 +2769,7 @@ retry: } /* collect core counters only for 1st thread in core */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + if (!is_cpu_first_thread_in_core(t, c, p)) goto done; if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) { @@ -2800,7 +2824,7 @@ retry: } /* collect package counters only for 1st core in package */ - if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (!is_cpu_first_core_in_package(t, c, p)) goto done; if (DO_BIC(BIC_Totl_c0)) { @@ -4581,7 +4605,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) cpu = t->cpu_id; /* EPB is per-package */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (!is_cpu_first_thread_in_package(t, c, p)) return 0; if (cpu_migrate(cpu)) { @@ -4630,7 +4654,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) cpu = t->cpu_id; /* MSR_HWP_CAPABILITIES is per-package */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (!is_cpu_first_thread_in_package(t, c, p)) return 0; if (cpu_migrate(cpu)) { @@ -4713,7 +4737,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data cpu = t->cpu_id; /* per-package */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (!is_cpu_first_thread_in_package(t, c, p)) return 0; if (cpu_migrate(cpu)) { @@ -4930,7 +4954,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; /* RAPL counters are per package, so print only for 1st thread/package */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (!is_cpu_first_thread_in_package(t, c, p)) return 0; cpu = t->cpu_id; @@ -5083,7 +5107,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk return 0; /* this is a per-package concept */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + if (!is_cpu_first_thread_in_package(t, c, p)) return 0; cpu = t->cpu_id; @@ -5152,7 +5176,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p cpu = t->cpu_id; /* DTS is per-core, no need to print for each thread */ - if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + if (!is_cpu_first_thread_in_core(t, c, p)) return 0; if (cpu_migrate(cpu)) { @@ -5160,7 +5184,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p return -1; } - if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { + if (do_ptm && is_cpu_first_core_in_package(t, c, p)) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return 0; From ccf8a0528061b4ca5f5c0e73c8d888e7e6d8b054 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 6 Oct 2023 09:59:14 +0800 Subject: [PATCH 073/299] tools/power/turbostat: Obey allowed CPUs for primary thread/core detection Thread_id doesn't tell if a CPU is allowed or not. Detect allowed CPUs only and use the first detected thread/core as the primary thread/core of a core/package. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 35 ++++++++++++--------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 5f6bc076e1dd..e586164906fa 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -926,12 +926,11 @@ struct thread_data { unsigned int x2apic_id; unsigned int flags; bool is_atom; -#define CPU_IS_FIRST_THREAD_IN_CORE 0x2 -#define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 unsigned long long counter[MAX_ADDED_THREAD_COUNTERS]; } *thread_even, *thread_odd; struct core_data { + int base_cpu; unsigned long long c3; unsigned long long c6; unsigned long long c7; @@ -944,6 +943,7 @@ struct core_data { } *core_even, *core_odd; struct pkg_data { + int base_cpu; unsigned long long pc2; unsigned long long pc3; unsigned long long pc6; @@ -1200,26 +1200,21 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk int is_cpu_first_thread_in_core(struct thread_data *t, struct core_data *c, struct pkg_data *p) { - UNUSED(c); UNUSED(p); - return (t->flags & CPU_IS_FIRST_THREAD_IN_CORE); + return ((int)t->cpu_id == c->base_cpu || c->base_cpu < 0); } int is_cpu_first_core_in_package(struct thread_data *t, struct core_data *c, struct pkg_data *p) { UNUSED(c); - UNUSED(p); - return (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE); + return ((int)t->cpu_id == p->base_cpu || p->base_cpu < 0); } int is_cpu_first_thread_in_package(struct thread_data *t, struct core_data *c, struct pkg_data *p) { - UNUSED(c); - UNUSED(p); - - return (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE); + return is_cpu_first_thread_in_core(t, c, p) && is_cpu_first_core_in_package(t, c, p); } int cpu_migrate(int cpu) @@ -2263,9 +2258,6 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data t->irq_count = 0; t->smi_count = 0; - /* tells format_counters to dump all fields from this set */ - t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE; - c->c3 = 0; c->c6 = 0; c->c7 = 0; @@ -5872,15 +5864,19 @@ void allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_ if (*c == NULL) goto error; - for (i = 0; i < num_cores; i++) + for (i = 0; i < num_cores; i++) { (*c)[i].core_id = -1; + (*c)[i].base_cpu = -1; + } *p = calloc(topo.num_packages, sizeof(struct pkg_data)); if (*p == NULL) goto error; - for (i = 0; i < topo.num_packages; i++) + for (i = 0; i < topo.num_packages; i++) { (*p)[i].package_id = i; + (*p)[i].base_cpu = -1; + } return; error: @@ -5913,10 +5909,11 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base, p = GET_PKG(pkg_base, pkg_id); t->cpu_id = cpu_id; - if (thread_id == 0) { - t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; - if (cpu_is_first_core_in_package(cpu_id)) - t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; + if (!cpu_is_not_allowed(cpu_id)) { + if (c->base_cpu < 0) + c->base_cpu = t->cpu_id; + if (p->base_cpu < 0) + p->base_cpu = t->cpu_id; } c->core_id = core_id; From 0fe3752901370b83b63d4ddbce708b23c8e41ea9 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 6 Oct 2023 18:35:26 +0800 Subject: [PATCH 074/299] tools/power/turbostat: Obey allowed CPUs for system summary System summary should summarize the information for allowed CPUs instead of all the present CPUs. Introduce topology information for allowed CPUs, and use them to get system summary. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 71 +++++++++++++++++---------- 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index e586164906fa..91aa03989317 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1134,6 +1134,9 @@ struct topo_params { int num_die; int num_cpus; int num_cores; + int allowed_packages; + int allowed_cpus; + int allowed_cores; int max_cpu_num; int max_node_num; int nodes_per_pkg; @@ -1179,7 +1182,6 @@ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pk struct thread_data *t; struct core_data *c; struct pkg_data *p; - t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); if (cpu_is_not_allowed(t->cpu_id)) @@ -2426,40 +2428,40 @@ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data /* Use the global time delta for the average. */ average.threads.tv_delta = tv_delta; - average.threads.tsc /= topo.num_cpus; - average.threads.aperf /= topo.num_cpus; - average.threads.mperf /= topo.num_cpus; - average.threads.instr_count /= topo.num_cpus; - average.threads.c1 /= topo.num_cpus; + average.threads.tsc /= topo.allowed_cpus; + average.threads.aperf /= topo.allowed_cpus; + average.threads.mperf /= topo.allowed_cpus; + average.threads.instr_count /= topo.allowed_cpus; + average.threads.c1 /= topo.allowed_cpus; if (average.threads.irq_count > 9999999) sums_need_wide_columns = 1; - average.cores.c3 /= topo.num_cores; - average.cores.c6 /= topo.num_cores; - average.cores.c7 /= topo.num_cores; - average.cores.mc6_us /= topo.num_cores; + average.cores.c3 /= topo.allowed_cores; + average.cores.c6 /= topo.allowed_cores; + average.cores.c7 /= topo.allowed_cores; + average.cores.mc6_us /= topo.allowed_cores; if (DO_BIC(BIC_Totl_c0)) - average.packages.pkg_wtd_core_c0 /= topo.num_packages; + average.packages.pkg_wtd_core_c0 /= topo.allowed_packages; if (DO_BIC(BIC_Any_c0)) - average.packages.pkg_any_core_c0 /= topo.num_packages; + average.packages.pkg_any_core_c0 /= topo.allowed_packages; if (DO_BIC(BIC_GFX_c0)) - average.packages.pkg_any_gfxe_c0 /= topo.num_packages; + average.packages.pkg_any_gfxe_c0 /= topo.allowed_packages; if (DO_BIC(BIC_CPUGFX)) - average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages; + average.packages.pkg_both_core_gfxe_c0 /= topo.allowed_packages; - average.packages.pc2 /= topo.num_packages; + average.packages.pc2 /= topo.allowed_packages; if (DO_BIC(BIC_Pkgpc3)) - average.packages.pc3 /= topo.num_packages; + average.packages.pc3 /= topo.allowed_packages; if (DO_BIC(BIC_Pkgpc6)) - average.packages.pc6 /= topo.num_packages; + average.packages.pc6 /= topo.allowed_packages; if (DO_BIC(BIC_Pkgpc7)) - average.packages.pc7 /= topo.num_packages; + average.packages.pc7 /= topo.allowed_packages; - average.packages.pc8 /= topo.num_packages; - average.packages.pc9 /= topo.num_packages; - average.packages.pc10 /= topo.num_packages; + average.packages.pc8 /= topo.allowed_packages; + average.packages.pc9 /= topo.allowed_packages; + average.packages.pc10 /= topo.allowed_packages; for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) @@ -2469,7 +2471,7 @@ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data sums_need_wide_columns = 1; continue; } - average.threads.counter[i] /= topo.num_cpus; + average.threads.counter[i] /= topo.allowed_cpus; } for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) @@ -2478,7 +2480,7 @@ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data if (average.cores.counter[i] > 9999999) sums_need_wide_columns = 1; } - average.cores.counter[i] /= topo.num_cores; + average.cores.counter[i] /= topo.allowed_cores; } for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) @@ -2487,7 +2489,7 @@ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data if (average.packages.counter[i] > 9999999) sums_need_wide_columns = 1; } - average.packages.counter[i] /= topo.num_packages; + average.packages.counter[i] /= topo.allowed_packages; } } @@ -3690,7 +3692,7 @@ void re_initialize(void) { free_all_buffers(); setup_all_buffers(); - fprintf(outf, "turbostat: re-initialized with num_cpus %d\n", topo.num_cpus); + fprintf(outf, "turbostat: re-initialized with num_cpus %d, allowed_cpus %d\n", topo.num_cpus, topo.allowed_cpus); } void set_max_cpu_num(void) @@ -5953,6 +5955,24 @@ void allocate_irq_buffers(void) err(-1, "calloc %d", topo.max_cpu_num + 1); } +int update_topo(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + topo.allowed_cpus++; + if ((int)t->cpu_id == c->base_cpu) + topo.allowed_cores++; + if ((int)t->cpu_id == p->base_cpu) + topo.allowed_packages++; + + return 0; +} + +void topology_update(void) +{ + topo.allowed_cpus = 0; + topo.allowed_cores = 0; + topo.allowed_packages = 0; + for_all_cpus(update_topo, ODD_COUNTERS); +} void setup_all_buffers(void) { topology_probe(); @@ -5962,6 +5982,7 @@ void setup_all_buffers(void) allocate_counters(&thread_odd, &core_odd, &package_odd); allocate_output_buffer(); for_all_proc_cpus(initialize_counters); + topology_update(); } void set_base_cpu(void) From c25ef0e5d9d7d5fb9e1679286fc7a11e70f16c70 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 20 Oct 2023 14:20:47 +0800 Subject: [PATCH 075/299] tools/power/turbostat: Handle offlined CPUs in cpu_subset It is possible that the cpu_subset contains offlined CPUs. If this happens during start, exit immediately because this is likely an operator error that is best fixed by re-invoking. If this happens at runtime, give a warning only because turbostat should do its best effort to continue running. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 31 ++++++++++++++++++--------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 91aa03989317..f9ea07961a22 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1149,7 +1149,7 @@ struct timeval tv_even, tv_odd, tv_delta; int *irq_column_2_cpu; /* /proc/interrupts column numbers */ int *irqs_per_cpu; /* indexed by cpu_num */ -void setup_all_buffers(void); +void setup_all_buffers(bool startup); char *sys_lpi_file; char *sys_lpi_file_sysfs = "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us"; @@ -3691,7 +3691,7 @@ int for_all_proc_cpus(int (func) (int)) void re_initialize(void) { free_all_buffers(); - setup_all_buffers(); + setup_all_buffers(false); fprintf(outf, "turbostat: re-initialized with num_cpus %d, allowed_cpus %d\n", topo.num_cpus, topo.allowed_cpus); } @@ -5692,7 +5692,7 @@ int dir_filter(const struct dirent *dirp) return 0; } -void topology_probe() +void topology_probe(bool startup) { int i; int max_core_id = 0; @@ -5734,7 +5734,12 @@ void topology_probe() CPU_ZERO_S(cpu_allowed_setsize, cpu_allowed_set); /* - * Validate that all cpus in cpu_subset are also in cpu_present_set + * Validate cpu_subset and update cpu_allowed_set. + * + * Make sure all cpus in cpu_subset are also in cpu_present_set during startup, + * and give a warning when cpus in cpu_subset become unavailable at runtime. + * + * cpu_allowed_set is the intersection of cpu_present_set and cpu_subset. */ for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) { if (!cpu_subset) { @@ -5743,9 +5748,15 @@ void topology_probe() continue; } if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) { - if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) - err(1, "cpu%d not present", i); - CPU_SET_S(i, cpu_allowed_setsize, cpu_allowed_set); + if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) { + /* all cpus in cpu_subset must be in cpu_present_set during startup */ + if (startup) + err(1, "cpu%d not present", i); + else + fprintf(stderr, "cpu%d not present\n", i); + } else { + CPU_SET_S(i, cpu_allowed_setsize, cpu_allowed_set); + } } } @@ -5973,9 +5984,9 @@ void topology_update(void) topo.allowed_packages = 0; for_all_cpus(update_topo, ODD_COUNTERS); } -void setup_all_buffers(void) +void setup_all_buffers(bool startup) { - topology_probe(); + topology_probe(startup); allocate_irq_buffers(); allocate_fd_percpu(); allocate_counters(&thread_even, &core_even, &package_even); @@ -6002,7 +6013,7 @@ void set_base_cpu(void) void turbostat_init() { - setup_all_buffers(); + setup_all_buffers(true); set_base_cpu(); check_dev_msr(); check_permissions(); From 8c3dd2c9e54273922ea71b2a4c0e77fc624c396b Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 20 Oct 2023 09:45:21 +0800 Subject: [PATCH 076/299] tools/power/turbostat: Abstrct function for parsing cpu string Abstract parse_cpu_str() which can update any specified cpu_set by a given cpu string. This can be used to handle further CPU limitations from other sources like cgroup. The cpu string parsing code is also enhanced to handle the strings that have an extra '\n' before string terminator. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 104 ++++++++++++++------------ 1 file changed, 55 insertions(+), 49 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f9ea07961a22..3a759b49f25e 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3565,6 +3565,59 @@ int get_physical_node_id(struct cpu_topology *thiscpu) return -1; } +static int parse_cpu_str(char *cpu_str, cpu_set_t *cpu_set, int cpu_set_size) +{ + unsigned int start, end; + char *next = cpu_str; + + while (next && *next) { + + if (*next == '-') /* no negative cpu numbers */ + return 1; + + start = strtoul(next, &next, 10); + + if (start >= CPU_SUBSET_MAXCPUS) + return 1; + CPU_SET_S(start, cpu_set_size, cpu_set); + + if (*next == '\0' || *next == '\n') + break; + + if (*next == ',') { + next += 1; + continue; + } + + if (*next == '-') { + next += 1; /* start range */ + } else if (*next == '.') { + next += 1; + if (*next == '.') + next += 1; /* start range */ + else + return 1; + } + + end = strtoul(next, &next, 10); + if (end <= start) + return 1; + + while (++start <= end) { + if (start >= CPU_SUBSET_MAXCPUS) + return 1; + CPU_SET_S(start, cpu_set_size, cpu_set); + } + + if (*next == ',') + next += 1; + else if (*next != '\0' && *next != '\n') + return 1; + } + + return 0; +} + int get_thread_siblings(struct cpu_topology *thiscpu) { char path[80], character; @@ -6384,9 +6437,6 @@ void probe_sysfs(void) */ void parse_cpu_command(char *optarg) { - unsigned int start, end; - char *next; - if (!strcmp(optarg, "core")) { if (cpu_subset) goto error; @@ -6409,52 +6459,8 @@ void parse_cpu_command(char *optarg) CPU_ZERO_S(cpu_subset_size, cpu_subset); - next = optarg; - - while (next && *next) { - - if (*next == '-') /* no negative cpu numbers */ - goto error; - - start = strtoul(next, &next, 10); - - if (start >= CPU_SUBSET_MAXCPUS) - goto error; - CPU_SET_S(start, cpu_subset_size, cpu_subset); - - if (*next == '\0') - break; - - if (*next == ',') { - next += 1; - continue; - } - - if (*next == '-') { - next += 1; /* start range */ - } else if (*next == '.') { - next += 1; - if (*next == '.') - next += 1; /* start range */ - else - goto error; - } - - end = strtoul(next, &next, 10); - if (end <= start) - goto error; - - while (++start <= end) { - if (start >= CPU_SUBSET_MAXCPUS) - goto error; - CPU_SET_S(start, cpu_subset_size, cpu_subset); - } - - if (*next == ',') - next += 1; - else if (*next != '\0') - goto error; - } + if (parse_cpu_str(optarg, cpu_subset, cpu_subset_size)) + goto error; return; From f638858da0925b29122f05135663013dc240eaf9 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Fri, 20 Oct 2023 09:39:22 +0800 Subject: [PATCH 077/299] tools/power/turbostat: Handle cgroup v2 cpu limitation CPUs can be isolated via cgroup settings and turbostat should avoid migrating to these CPUs, just like it does for the '-c' cpus. Introduce cpu_effective_set to save the cgroup cpu limitation info from /sys/fs/cgroup/cpuset.cpus.effective. And use cpu_allowed_set as the intersection of cpu_present_set, cpu_effective_set and cpu_subset. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 95 ++++++++++++++++++++++----- 1 file changed, 80 insertions(+), 15 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 3a759b49f25e..0ef6fba118b1 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -904,8 +904,8 @@ int backwards_count; char *progname; #define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */ -cpu_set_t *cpu_present_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset; -size_t cpu_present_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size; +cpu_set_t *cpu_present_set, *cpu_effective_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset; +size_t cpu_present_setsize, cpu_effective_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size; #define MAX_ADDED_COUNTERS 8 #define MAX_ADDED_THREAD_COUNTERS 24 #define BITMASK_SIZE 32 @@ -3419,6 +3419,10 @@ void free_all_buffers(void) cpu_present_set = NULL; cpu_present_setsize = 0; + CPU_FREE(cpu_effective_set); + cpu_effective_set = NULL; + cpu_effective_setsize = 0; + CPU_FREE(cpu_allowed_set); cpu_allowed_set = NULL; cpu_allowed_setsize = 0; @@ -3741,6 +3745,46 @@ int for_all_proc_cpus(int (func) (int)) return 0; } +#define PATH_EFFECTIVE_CPUS "/sys/fs/cgroup/cpuset.cpus.effective" + +static char cpu_effective_str[1024]; + +static int update_effective_str(bool startup) +{ + FILE *fp; + char *pos; + char buf[1024]; + int ret; + + if (cpu_effective_str[0] == '\0' && !startup) + return 0; + + fp = fopen(PATH_EFFECTIVE_CPUS, "r"); + if (!fp) + return 0; + + pos = fgets(buf, 1024, fp); + if (!pos) + err(1, "%s: file read failed\n", PATH_EFFECTIVE_CPUS); + + fclose(fp); + + ret = strncmp(cpu_effective_str, buf, 1024); + if (!ret) + return 0; + + strncpy(cpu_effective_str, buf, 1024); + return 1; +} + +static void update_effective_set(bool startup) +{ + update_effective_str(startup); + + if (parse_cpu_str(cpu_effective_str, cpu_effective_set, cpu_effective_setsize)) + err(1, "%s: cpu str malformat %s\n", PATH_EFFECTIVE_CPUS, cpu_effective_str); +} + void re_initialize(void) { free_all_buffers(); @@ -4257,6 +4301,10 @@ restart: re_initialize(); goto restart; } + if (update_effective_str(false)) { + re_initialize(); + goto restart; + } do_sleep(); if (snapshot_proc_sysfs_files()) goto restart; @@ -5777,6 +5825,16 @@ void topology_probe(bool startup) CPU_ZERO_S(cpu_present_setsize, cpu_present_set); for_all_proc_cpus(mark_cpu_present); + /* + * Allocate and initialize cpu_effective_set + */ + cpu_effective_set = CPU_ALLOC((topo.max_cpu_num + 1)); + if (cpu_effective_set == NULL) + err(3, "CPU_ALLOC"); + cpu_effective_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); + CPU_ZERO_S(cpu_effective_setsize, cpu_effective_set); + update_effective_set(startup); + /* * Allocate and initialize cpu_allowed_set */ @@ -5787,30 +5845,37 @@ void topology_probe(bool startup) CPU_ZERO_S(cpu_allowed_setsize, cpu_allowed_set); /* - * Validate cpu_subset and update cpu_allowed_set. + * Validate and update cpu_allowed_set. * - * Make sure all cpus in cpu_subset are also in cpu_present_set during startup, - * and give a warning when cpus in cpu_subset become unavailable at runtime. + * Make sure all cpus in cpu_subset are also in cpu_present_set during startup. + * Give a warning when cpus in cpu_subset become unavailable at runtime. + * Give a warning when cpus are not effective because of cgroup setting. * - * cpu_allowed_set is the intersection of cpu_present_set and cpu_subset. + * cpu_allowed_set is the intersection of cpu_present_set/cpu_effective_set/cpu_subset. */ for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) { - if (!cpu_subset) { - if (CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) - CPU_SET_S(i, cpu_allowed_setsize, cpu_allowed_set); + if (cpu_subset && !CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) continue; - } - if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) { - if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) { - /* all cpus in cpu_subset must be in cpu_present_set during startup */ + + if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) { + if (cpu_subset) { + /* cpus in cpu_subset must be in cpu_present_set during startup */ if (startup) err(1, "cpu%d not present", i); else fprintf(stderr, "cpu%d not present\n", i); - } else { - CPU_SET_S(i, cpu_allowed_setsize, cpu_allowed_set); + } + continue; + } + + if (CPU_COUNT_S(cpu_effective_setsize, cpu_effective_set)) { + if (!CPU_ISSET_S(i, cpu_effective_setsize, cpu_effective_set)) { + fprintf(stderr, "cpu%d not effective\n", i); + continue; } } + + CPU_SET_S(i, cpu_allowed_setsize, cpu_allowed_set); } if (!CPU_COUNT_S(cpu_allowed_setsize, cpu_allowed_set)) From 37f68a2940558b4f6f8e51b7b1d00f084b4bdde2 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 24 Jan 2023 10:39:53 -0800 Subject: [PATCH 078/299] tools/power/turbostat: Move process to root cgroup When available CPUs are reduced via cgroup cpuset controller, turbostat will exit with errors (For example): get_counters: Could not migrate to CPU 0 turbostat: re-initialized with num_cpus 20 get_counters: Could not migrate to CPU 0 turbostat: re-initialized with num_cpus 20 Move the turbostat to root cgroup, which has every CPU. Writing the value 0 to a cgroup.procs file causes the writing process to be moved to the corresponding cgroup. Signed-off-by: Srinivas Pandruvada Tested-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 0ef6fba118b1..fea63d9d8e02 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -6666,6 +6666,19 @@ void cmdline(int argc, char **argv) int main(int argc, char **argv) { + int fd, ret; + + fd = open("/sys/fs/cgroup/cgroup.procs", O_WRONLY); + if (fd < 0) + goto skip_cgroup_setting; + + ret = write(fd, "0\n", 2); + if (ret == -1) + perror("Can't update cgroup\n"); + + close(fd); + +skip_cgroup_setting: outf = stderr; cmdline(argc, argv); From 0e3f10e6aa97b0134b526ec9cdc3ccdac2239b43 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 19 Oct 2023 11:04:33 +0800 Subject: [PATCH 079/299] tools/power/turbostat: Add MSR_CORE_C1_RES support for spr_features Add MSR_CORE_C1_RES support for spr_features because both Sapphirerapids and Emeraldrapids support this MSR. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index fea63d9d8e02..bbeeec02bf5b 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -667,6 +667,7 @@ static const struct platform_features spr_features = { .bclk_freq = BCLK_100MHZ, .supported_cstates = CC1 | CC6 | PC2 | PC6, .cst_limit = CST_LIMIT_SKX, + .has_msr_core_c1_res = 1, .has_irtl_msrs = 1, .has_cst_prewake_bit = 1, .trl_msrs = TRL_BASE | TRL_CORECOUNT, From 5feab4a6b8a730438a0fe8758dfa0700f951edde Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 9 Sep 2023 13:28:07 +0800 Subject: [PATCH 080/299] tools/power/turbostat: Add initial support for GraniteRapids Add initial support for GraniteRapids. It shares the same features with SapphireRapids. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bbeeec02bf5b..981d39454b49 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -829,6 +829,7 @@ static const struct platform_data turbostat_pdata[] = { { INTEL_FAM6_TIGERLAKE, &cnl_features }, { INTEL_FAM6_SAPPHIRERAPIDS_X, &spr_features }, { INTEL_FAM6_EMERALDRAPIDS_X, &spr_features }, + { INTEL_FAM6_GRANITERAPIDS_X, &spr_features }, { INTEL_FAM6_LAKEFIELD, &cnl_features }, { INTEL_FAM6_ALDERLAKE, &adl_features }, { INTEL_FAM6_ALDERLAKE_L, &adl_features }, From d33605f367414c7e0009978d1fbe9af01a36e221 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 28 Sep 2023 13:09:02 +0800 Subject: [PATCH 081/299] tools/power/turbostat: Add initial support for SierraForest Add initial support for SierraForest. It shares the same features with SapphireRapids, except that it has MSR_MODULE_C6_RES_MS support. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 981d39454b49..a50b6d071f6a 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -674,6 +674,22 @@ static const struct platform_features spr_features = { .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; +static const struct platform_features srf_features = { + .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, + .has_config_tdp = 1, + .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC6 | PC2 | PC6, + .cst_limit = CST_LIMIT_SKX, + .has_msr_core_c1_res = 1, + .has_msr_module_c6_res_ms = 1, + .has_irtl_msrs = 1, + .has_cst_prewake_bit = 1, + .trl_msrs = TRL_BASE | TRL_CORECOUNT, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, +}; + static const struct platform_features slv_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, @@ -848,6 +864,7 @@ static const struct platform_data turbostat_pdata[] = { { INTEL_FAM6_ATOM_TREMONT, &tmt_features }, { INTEL_FAM6_ATOM_TREMONT_L, &tmt_features }, { INTEL_FAM6_ATOM_GRACEMONT, &adl_features }, + { INTEL_FAM6_ATOM_CRESTMONT_X, &srf_features }, { INTEL_FAM6_XEON_PHI_KNL, &knl_features }, { INTEL_FAM6_XEON_PHI_KNM, &knl_features }, /* From 5a6efcb9102af4210d5a59182dbfbc594ae50fd4 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 19 Oct 2023 11:00:07 +0800 Subject: [PATCH 082/299] tools/power/turbostat: Add initial support for GrandRidge Add initial support for GrandRidge. It shares the same features as SierraForest, except that it does not support PC2/PC6. Signed-off-by: Zhang Rui --- tools/power/x86/turbostat/turbostat.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index a50b6d071f6a..3407f08593f8 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -690,6 +690,22 @@ static const struct platform_features srf_features = { .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, }; +static const struct platform_features grr_features = { + .has_msr_misc_feature_control = 1, + .has_msr_misc_pwr_mgmt = 1, + .has_nhm_msrs = 1, + .has_config_tdp = 1, + .bclk_freq = BCLK_100MHZ, + .supported_cstates = CC1 | CC6, + .cst_limit = CST_LIMIT_SKX, + .has_msr_core_c1_res = 1, + .has_msr_module_c6_res_ms = 1, + .has_irtl_msrs = 1, + .has_cst_prewake_bit = 1, + .trl_msrs = TRL_BASE | TRL_CORECOUNT, + .rapl_msrs = RAPL_PKG_ALL | RAPL_DRAM_ALL, +}; + static const struct platform_features slv_features = { .has_nhm_msrs = 1, .bclk_freq = BCLK_SLV, @@ -865,6 +881,7 @@ static const struct platform_data turbostat_pdata[] = { { INTEL_FAM6_ATOM_TREMONT_L, &tmt_features }, { INTEL_FAM6_ATOM_GRACEMONT, &adl_features }, { INTEL_FAM6_ATOM_CRESTMONT_X, &srf_features }, + { INTEL_FAM6_ATOM_CRESTMONT, &grr_features }, { INTEL_FAM6_XEON_PHI_KNL, &knl_features }, { INTEL_FAM6_XEON_PHI_KNM, &knl_features }, /* From 7b57e7b683e3872b02117f46bd7dc7ad765888a8 Mon Sep 17 00:00:00 2001 From: Sumeet Pawnikar Date: Tue, 23 May 2023 17:46:45 +0530 Subject: [PATCH 083/299] tools/power/turbostat: Add initial support for ArrowLake Add initial support for ArrowLake platform. It shares the same features with CannonLake. Signed-off-by: Sumeet Pawnikar --- tools/power/x86/turbostat/turbostat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 3407f08593f8..6c52cceae1d7 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -870,6 +870,7 @@ static const struct platform_data turbostat_pdata[] = { { INTEL_FAM6_RAPTORLAKE_S, &adl_features }, { INTEL_FAM6_METEORLAKE, &cnl_features }, { INTEL_FAM6_METEORLAKE_L, &cnl_features }, + { INTEL_FAM6_ARROWLAKE, &cnl_features }, { INTEL_FAM6_ATOM_SILVERMONT, &slv_features }, { INTEL_FAM6_ATOM_SILVERMONT_D, &slvd_features }, { INTEL_FAM6_ATOM_AIRMONT, &amt_features }, From 956dbd3de400a5665faf08a8588556db9c1bb56e Mon Sep 17 00:00:00 2001 From: Sumeet Pawnikar Date: Tue, 23 May 2023 17:46:45 +0530 Subject: [PATCH 084/299] tools/power/turbostat: Add initial support for LunarLake Add initial support for LunarLake platform. It shares the same features with CannonLake. Signed-off-by: Sumeet Pawnikar --- tools/power/x86/turbostat/turbostat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 6c52cceae1d7..8a311d7272e7 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -871,6 +871,7 @@ static const struct platform_data turbostat_pdata[] = { { INTEL_FAM6_METEORLAKE, &cnl_features }, { INTEL_FAM6_METEORLAKE_L, &cnl_features }, { INTEL_FAM6_ARROWLAKE, &cnl_features }, + { INTEL_FAM6_LUNARLAKE_M, &cnl_features }, { INTEL_FAM6_ATOM_SILVERMONT, &slv_features }, { INTEL_FAM6_ATOM_SILVERMONT_D, &slvd_features }, { INTEL_FAM6_ATOM_AIRMONT, &amt_features }, From c692800cb2ef7a4f4940c68d765cd4649aff3e46 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 2 Nov 2023 02:33:14 +0300 Subject: [PATCH 085/299] MAINTAINERS: Add Intel TDX entry Add myself as Intel TDX maintainer. I drove upstreaming most of TDX code so far and I will continue working on TDX for foreseeable future. [ dhansen: * Add myself as a reviewer too * Swap Maintained=>Supported. I double checked Kirill is still being paid * Add drivers/virt/coco/tdx-guest ] Suggested-by: Dave Hansen Signed-off-by: Kirill A. Shutemov Signed-off-by: Dave Hansen Acked-by: Dave Hansen Acked-by: Kuppuswamy Sathyanarayanan Acked-by: Kai Huang Link: https://lore.kernel.org/all/20231101233314.2567-1-kirill.shutemov%40linux.intel.com --- MAINTAINERS | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index dd5de540ec0b..b697020eca1c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23460,6 +23460,20 @@ F: arch/x86/kernel/dumpstack.c F: arch/x86/kernel/stacktrace.c F: arch/x86/kernel/unwind_*.c +X86 TRUST DOMAIN EXTENSIONS (TDX) +M: Kirill A. Shutemov +R: Dave Hansen +L: x86@kernel.org +L: linux-coco@lists.linux.dev +S: Supported +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/tdx +F: arch/x86/boot/compressed/tdx* +F: arch/x86/coco/tdx/ +F: arch/x86/include/asm/shared/tdx.h +F: arch/x86/include/asm/tdx.h +F: arch/x86/virt/vmx/tdx/ +F: drivers/virt/coco/tdx-guest + X86 VDSO M: Andy Lutomirski L: linux-kernel@vger.kernel.org From d3badb15613c14dd35d3495b1dde5c90fcd616dd Mon Sep 17 00:00:00 2001 From: Fang Xiang Date: Mon, 30 Oct 2023 16:32:56 +0800 Subject: [PATCH 086/299] irqchip/gic-v3-its: Flush ITS tables correctly in non-coherent GIC designs In non-coherent GIC designs, the ITS tables must be flushed before writing to the GITS_BASER registers, otherwise the ITS could read dirty tables, which results in unpredictable behavior. Flush the tables right at the begin of its_setup_baser() to prevent that. [ tglx: Massage changelog ] Fixes: a8707f553884 ("irqchip/gic-v3: Add Rockchip 3588001 erratum workaround") Suggested-by: Marc Zyngier Signed-off-by: Fang Xiang Signed-off-by: Thomas Gleixner Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231030083256.4345-1-fangxiang3@xiaomi.com --- drivers/irqchip/irq-gic-v3-its.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index a8c89df1a997..9a7a74239eab 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2379,12 +2379,12 @@ retry_baser: break; } + if (!shr) + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + its_write_baser(its, baser, val); tmp = baser->val; - if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) - tmp &= ~GITS_BASER_SHAREABILITY_MASK; - if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { /* * Shareability didn't stick. Just use @@ -2394,10 +2394,9 @@ retry_baser: * non-cacheable as well. */ shr = tmp & GITS_BASER_SHAREABILITY_MASK; - if (!shr) { + if (!shr) cache = GITS_BASER_nC; - gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); - } + goto retry_baser; } @@ -2609,6 +2608,11 @@ static int its_alloc_tables(struct its_node *its) /* erratum 24313: ignore memory access type */ cache = GITS_BASER_nCnB; + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) { + cache = GITS_BASER_nC; + shr = 0; + } + for (i = 0; i < GITS_BASER_NR_REGS; i++) { struct its_baser *baser = its->tables + i; u64 val = its_read_baser(its, baser); From 18216762bcf618c52b85719d3563243f80e4a2d4 Mon Sep 17 00:00:00 2001 From: Bagas Sanjaya Date: Mon, 6 Nov 2023 17:12:04 +0700 Subject: [PATCH 087/299] x86/Documentation: Indent 'note::' directive for protocol version number note The protocol version number note is between the protocol version table and the memory layout section. As such, Sphinx renders the note directive not only on the actual note, but until the end of doc. Indent the directive so that only the actual protocol version number note is rendered as such. Fixes: 2c33c27fd603 ("x86/boot: Introduce kernel_info") Signed-off-by: Bagas Sanjaya Signed-off-by: Ingo Molnar Cc: Jonathan Corbet Link: https://lore.kernel.org/r/20231106101206.76487-2-bagasdotme@gmail.com Signed-off-by: Ingo Molnar --- Documentation/arch/x86/boot.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/arch/x86/boot.rst b/Documentation/arch/x86/boot.rst index f5d2f2414de8..22cc7a040dae 100644 --- a/Documentation/arch/x86/boot.rst +++ b/Documentation/arch/x86/boot.rst @@ -77,7 +77,7 @@ Protocol 2.14 BURNT BY INCORRECT COMMIT Protocol 2.15 (Kernel 5.5) Added the kernel_info and kernel_info.setup_type_max. ============= ============================================================ -.. note:: + .. note:: The protocol version number should be changed only if the setup header is changed. There is no need to update the version number if boot_params or kernel_info are changed. Additionally, it is recommended to use From f2c1dba31133233697fc96e808c6005fc304a8e9 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Wed, 28 Jun 2023 09:40:53 -0400 Subject: [PATCH 088/299] tools/power/turbostat: bugfix "--show IPC" turbostat --show IPC displays "inf" for the IPC column turbostat was missing the explicit dependency of IPC on APERF, and thus neglected to collect APERF when only IPC was requested. typcial use: turbostat --quiet --show CPU,IPC Signed-off-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 8a311d7272e7..5aa6598a40a9 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -2202,7 +2202,8 @@ int delta_thread(struct thread_data *new, struct thread_data *old, struct core_d old->c1 = new->c1 - old->c1; - if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || soft_c1_residency_display(BIC_Avg_MHz)) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || DO_BIC(BIC_IPC) + || soft_c1_residency_display(BIC_Avg_MHz)) { if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { old->aperf = new->aperf - old->aperf; old->mperf = new->mperf - old->mperf; @@ -2724,7 +2725,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) retry: t->tsc = rdtsc(); /* we are running on local CPU of interest */ - if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || soft_c1_residency_display(BIC_Avg_MHz)) { + if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || DO_BIC(BIC_IPC) + || soft_c1_residency_display(BIC_Avg_MHz)) { unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; /* From b8337e6a780dad9505f9d44da07c0a5c52fa0a04 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Tue, 7 Nov 2023 23:28:30 -0500 Subject: [PATCH 089/299] tools/power turbostat: version 2023.11.07 Turbostat features are now table-driven (Rui Zhang) Add support for some new platforms (Sumeet Pawnikar, Rui Zhang) Gracefully run in configs when CPUs are limited (Rui Zhang, Srinivas Pandruvada) misc minor fixes. Signed-off-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 5aa6598a40a9..7a334377f92b 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -6259,7 +6259,7 @@ int get_and_dump_counters(void) void print_version() { - fprintf(outf, "turbostat version 2023.03.17 - Len Brown \n"); + fprintf(outf, "turbostat version 2023.11.07 - Len Brown \n"); } #define COMMAND_LINE_SIZE 2048 From 31255e072b2e91f97645d792d25b2db744186dd1 Mon Sep 17 00:00:00 2001 From: Rick Edgecombe Date: Tue, 7 Nov 2023 10:22:51 -0800 Subject: [PATCH 090/299] x86/shstk: Delay signal entry SSP write until after user accesses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a signal is being delivered, the kernel needs to make accesses to userspace. These accesses could encounter an access error, in which case the signal delivery itself will trigger a segfault. Usually this would result in the kernel killing the process. But in the case of a SEGV signal handler being configured, the failure of the first signal delivery will result in *another* signal getting delivered. The second signal may succeed if another thread has resolved the issue that triggered the segfault (i.e. a well timed mprotect()/mmap()), or the second signal is being delivered to another stack (i.e. an alt stack). On x86, in the non-shadow stack case, all the accesses to userspace are done before changes to the registers (in pt_regs). The operation is aborted when an access error occurs, so although there may be writes done for the first signal, control flow changes for the signal (regs->ip, regs->sp, etc) are not committed until all the accesses have already completed successfully. This means that the second signal will be delivered as if it happened at the time of the first signal. It will effectively replace the first aborted signal, overwriting the half-written frame of the aborted signal. So on sigreturn from the second signal, control flow will resume happily from the point of control flow where the original signal was delivered. The problem is, when shadow stack is active, the shadow stack SSP register/MSR is updated *before* some of the userspace accesses. This means if the earlier accesses succeed and the later ones fail, the second signal will not be delivered at the same spot on the shadow stack as the first one. So on sigreturn from the second signal, the SSP will be pointing to the wrong location on the shadow stack (off by a frame). Pengfei privately reported that while using a shadow stack enabled glibc, the “signal06” test in the LTP test-suite hung. It turns out it is testing the above described double signal scenario. When this test was compiled with shadow stack, the first signal pushed a shadow stack sigframe, then the second pushed another. When the second signal was handled, the SSP was at the first shadow stack signal frame instead of the original location. The test then got stuck as the #CP from the twice incremented SSP was incorrect and generated segfaults in a loop. Fix this by adjusting the SSP register only after any userspace accesses, such that there can be no failures after the SSP is adjusted. Do this by moving the shadow stack sigframe push logic to happen after all other userspace accesses. Note, sigreturn (as opposed to the signal delivery dealt with in this patch) has ordering behavior that could lead to similar failures. The ordering issues there extend beyond shadow stack to include the alt stack restoration. Fixing that would require cross-arch changes, and the ordering today does not cause any known test or apps breakages. So leave it as is, for now. [ dhansen: minor changelog/subject tweak ] Fixes: 05e36022c054 ("x86/shstk: Handle signals for shadow stack") Reported-by: Pengfei Xu Signed-off-by: Rick Edgecombe Signed-off-by: Dave Hansen Tested-by: Pengfei Xu Cc:stable@vger.kernel.org Link: https://lore.kernel.org/all/20231107182251.91276-1-rick.p.edgecombe%40intel.com Link: https://github.com/linux-test-project/ltp/blob/master/testcases/kernel/syscalls/signal/signal06.c --- arch/x86/kernel/signal_64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index cacf2ede6217..23d8aaf8d9fd 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -175,9 +175,6 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp); uc_flags = frame_uc_flags(regs); - if (setup_signal_shadow_stack(ksig)) - return -EFAULT; - if (!user_access_begin(frame, sizeof(*frame))) return -EFAULT; @@ -198,6 +195,9 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) return -EFAULT; } + if (setup_signal_shadow_stack(ksig)) + return -EFAULT; + /* Set up registers for signal handler */ regs->di = ksig->sig; /* In case the signal handler was declared without prototypes */ From 19597cad64d608aa8ac2f8aef50a50187a565223 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Mon, 30 Oct 2023 12:19:12 +0530 Subject: [PATCH 091/299] scsi: qla2xxx: Fix system crash due to bad pointer access User experiences system crash when running AER error injection. The perturbation causes the abort-all-I/O path to trigger. The driver assumes all I/O on this path is FCP only. If there is both NVMe & FCP traffic, a system crash happens. Add additional check to see if I/O is FCP or not before access. PID: 999019 TASK: ff35d769f24722c0 CPU: 53 COMMAND: "kworker/53:1" 0 [ff3f78b964847b58] machine_kexec at ffffffffae86973d 1 [ff3f78b964847ba8] __crash_kexec at ffffffffae9be29d 2 [ff3f78b964847c70] crash_kexec at ffffffffae9bf528 3 [ff3f78b964847c78] oops_end at ffffffffae8282ab 4 [ff3f78b964847c98] exc_page_fault at ffffffffaf2da502 5 [ff3f78b964847cc0] asm_exc_page_fault at ffffffffaf400b62 [exception RIP: qla2x00_abort_srb+444] RIP: ffffffffc07b5f8c RSP: ff3f78b964847d78 RFLAGS: 00010046 RAX: 0000000000000282 RBX: ff35d74a0195a200 RCX: ff35d76886fd03a0 RDX: 0000000000000001 RSI: ffffffffc07c5ec8 RDI: ff35d74a0195a200 RBP: ff35d76913d22080 R8: ff35d7694d103200 R9: ff35d7694d103200 R10: 0000000100000000 R11: ffffffffb05d6630 R12: 0000000000010000 R13: ff3f78b964847df8 R14: ff35d768d8754000 R15: ff35d768877248e0 ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 6 [ff3f78b964847d70] qla2x00_abort_srb at ffffffffc07b5f84 [qla2xxx] 7 [ff3f78b964847de0] __qla2x00_abort_all_cmds at ffffffffc07b6238 [qla2xxx] 8 [ff3f78b964847e38] qla2x00_abort_all_cmds at ffffffffc07ba635 [qla2xxx] 9 [ff3f78b964847e58] qla2x00_terminate_rport_io at ffffffffc08145eb [qla2xxx] 10 [ff3f78b964847e70] fc_terminate_rport_io at ffffffffc045987e [scsi_transport_fc] 11 [ff3f78b964847e88] process_one_work at ffffffffae914f15 12 [ff3f78b964847ed0] worker_thread at ffffffffae9154c0 13 [ff3f78b964847f10] kthread at ffffffffae91c456 14 [ff3f78b964847f50] ret_from_fork at ffffffffae8036ef Cc: stable@vger.kernel.org Fixes: f45bca8c5052 ("scsi: qla2xxx: Fix double scsi_done for abort path") Signed-off-by: Quinn Tran Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20231030064912.37912-1-njavali@marvell.com Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_os.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7e103d711825..d24410944f7d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1837,8 +1837,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, } spin_lock_irqsave(qp->qp_lock_ptr, *flags); - if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd))) - sp->done(sp, res); + switch (sp->type) { + case SRB_SCSI_CMD: + if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd))) + sp->done(sp, res); + break; + default: + if (ret_cmd) + sp->done(sp, res); + break; + } } else { sp->done(sp, res); } From defde5a50d91c74e1ce71a7f0bce7fb1ae311d84 Mon Sep 17 00:00:00 2001 From: Naomi Chu Date: Thu, 2 Nov 2023 13:24:24 +0800 Subject: [PATCH 092/299] scsi: ufs: core: Expand MCQ queue slot to DeviceQueueDepth + 1 The UFSHCI 4.0 specification mandates that there should always be at least one empty slot in each queue for distinguishing between full and empty states. Enlarge 'hwq->max_entries' to 'DeviceQueueDepth + 1' to allow UFSHCI 4.0 controllers to fully utilize MCQ queue slots. Fixes: 4682abfae2eb ("scsi: ufs: core: mcq: Allocate memory for MCQ mode") Signed-off-by: Naomi Chu Link: https://lore.kernel.org/r/20231102052426.12006-2-naomi.chu@mediatek.com Reviewed-by: Stanley Chu Reviewed-by: Bart Van Assche Reviewed-by: Peter Wang Reviewed-by: Chun-Hung Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 2ba8ec254dce..5c75ab9d6bb5 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -436,7 +436,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba) for (i = 0; i < hba->nr_hw_queues; i++) { hwq = &hba->uhq[i]; - hwq->max_entries = hba->nutrs; + hwq->max_entries = hba->nutrs + 1; spin_lock_init(&hwq->sq_lock); spin_lock_init(&hwq->cq_lock); mutex_init(&hwq->sq_mutex); From 27900d7119c464b43cd9eac69c85884d17bae240 Mon Sep 17 00:00:00 2001 From: Peter Wang Date: Mon, 6 Nov 2023 15:51:17 +0800 Subject: [PATCH 093/299] scsi: ufs: core: Fix racing issue between ufshcd_mcq_abort() and ISR If command timeout happens and cq complete IRQ is raised at the same time, ufshcd_mcq_abort clears lprb->cmd and a NULL pointer deref happens in the ISR. Error log: ufshcd_abort: Device abort task at tag 18 Unable to handle kernel NULL pointer dereference at virtual address 0000000000000108 pc : [0xffffffe27ef867ac] scsi_dma_unmap+0xc/0x44 lr : [0xffffffe27f1b898c] ufshcd_release_scsi_cmd+0x24/0x114 Fixes: f1304d442077 ("scsi: ufs: mcq: Added ufshcd_mcq_abort()") Cc: stable@vger.kernel.org Signed-off-by: Peter Wang Link: https://lore.kernel.org/r/20231106075117.8995-1-peter.wang@mediatek.com Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 5c75ab9d6bb5..0787456c2b89 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -630,6 +630,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) int tag = scsi_cmd_to_rq(cmd)->tag; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; + unsigned long flags; int err = FAILED; if (!ufshcd_cmd_inflight(lrbp->cmd)) { @@ -670,8 +671,10 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) } err = SUCCESS; + spin_lock_irqsave(&hwq->cq_lock, flags); if (ufshcd_cmd_inflight(lrbp->cmd)) ufshcd_release_scsi_cmd(hba, lrbp); + spin_unlock_irqrestore(&hwq->cq_lock, flags); out: return err; From 860c3d03bbc3f17aef8600662c488f27fd093142 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 6 Nov 2023 17:04:33 +0300 Subject: [PATCH 094/299] scsi: scsi_debug: Fix some bugs in sdebug_error_write() There are two bug in this code: 1) If count is zero, then it will lead to a NULL dereference. The kmalloc() will successfully allocate zero bytes and the test for "if (buf[0] == '-')" will read beyond the end of the zero size buffer and Oops. 2) The code does not ensure that the user's string is properly NUL terminated which could lead to a read overflow. Fixes: a9996d722b11 ("scsi: scsi_debug: Add interface to manage error injection for a single device") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/7733643d-e102-4581-8d29-769472011c97@moroto.mountain Reviewed-by: Wenchao Hao Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 67922e2c4c19..0dd21598f7b6 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1019,7 +1019,7 @@ static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf, struct sdebug_err_inject *inject; struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private; - buf = kmalloc(count, GFP_KERNEL); + buf = kzalloc(count + 1, GFP_KERNEL); if (!buf) return -ENOMEM; From 037fbd3fcfbd99145f9310d93f6637012807cfd0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 6 Nov 2023 17:05:04 +0300 Subject: [PATCH 095/299] scsi: scsi_debug: Delete some bogus error checking Smatch complains that "dentry" is never initialized. These days everyone initializes all their stack variables to zero so this means that it will trigger a warning every time this function is run. Really, debugfs functions are not supposed to be checked for errors in normal code. For example, if we updated this code to check the correct variable then it would print a warning if CONFIG_DEBUGFS was disabled. We don't want that. Just delete the check. Fixes: f084fe52c640 ("scsi: scsi_debug: Add debugfs interface to fail target reset") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/c602c9ad-5e35-4e18-a47f-87ed956a9ec2@moroto.mountain Reviewed-by: Wenchao Hao Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_debug.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 0dd21598f7b6..6d8218a44122 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1132,7 +1132,6 @@ static const struct file_operations sdebug_target_reset_fail_fops = { static int sdebug_target_alloc(struct scsi_target *starget) { struct sdebug_target_info *targetip; - struct dentry *dentry; targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL); if (!targetip) @@ -1140,15 +1139,9 @@ static int sdebug_target_alloc(struct scsi_target *starget) targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev), sdebug_debugfs_root); - if (IS_ERR_OR_NULL(targetip->debugfs_entry)) - pr_info("%s: failed to create debugfs directory for target %s\n", - __func__, dev_name(&starget->dev)); debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget, &sdebug_target_reset_fail_fops); - if (IS_ERR_OR_NULL(dentry)) - pr_info("%s: failed to create fail_reset file for target %s\n", - __func__, dev_name(&starget->dev)); starget->hostdata = targetip; From 3b83486399a6a9feb9c681b74c21a227d48d7020 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 6 Nov 2023 17:13:04 -0600 Subject: [PATCH 096/299] scsi: sd: Fix sshdr use in sd_suspend_common() If scsi_execute_cmd() returns < 0, it doesn't initialize the sshdr, so we shouldn't access the sshdr. If it returns 0, then the cmd executed successfully, so there is no need to check the sshdr. sd_sync_cache() will only access the sshdr if it's been setup because it calls scsi_status_is_check_condition() before accessing it. However, the sd_sync_cache() caller, sd_suspend_common(), does not check. sd_suspend_common() is only checking for ILLEGAL_REQUEST which it's using to determine if the command is supported. If it's not it just ignores the error. So to fix its sshdr use this patch just moves that check to sd_sync_cache() where it converts ILLEGAL_REQUEST to success/0. sd_suspend_common() was ignoring that error and sd_shutdown() doesn't check for errors so there will be no behavior changes. Signed-off-by: Mike Christie Link: https://lore.kernel.org/r/20231106231304.5694-2-michael.christie@oracle.com Reviewed-by: Christoph Hellwig Reviewed-by: Martin Wilck Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/sd.c | 53 ++++++++++++++++++++--------------------------- 1 file changed, 23 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 864db30b9c8e..3ef3b00397fe 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1565,24 +1565,21 @@ out: return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0; } -static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) +static int sd_sync_cache(struct scsi_disk *sdkp) { int retries, res; struct scsi_device *sdp = sdkp->device; const int timeout = sdp->request_queue->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; - struct scsi_sense_hdr my_sshdr; + struct scsi_sense_hdr sshdr; const struct scsi_exec_args exec_args = { .req_flags = BLK_MQ_REQ_PM, - /* caller might not be interested in sense, but we need it */ - .sshdr = sshdr ? : &my_sshdr, + .sshdr = &sshdr, }; if (!scsi_device_online(sdp)) return -ENODEV; - sshdr = exec_args.sshdr; - for (retries = 3; retries > 0; --retries) { unsigned char cmd[16] = { 0 }; @@ -1607,15 +1604,23 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) return res; if (scsi_status_is_check_condition(res) && - scsi_sense_valid(sshdr)) { - sd_print_sense_hdr(sdkp, sshdr); + scsi_sense_valid(&sshdr)) { + sd_print_sense_hdr(sdkp, &sshdr); /* we need to evaluate the error return */ - if (sshdr->asc == 0x3a || /* medium not present */ - sshdr->asc == 0x20 || /* invalid command */ - (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */ + if (sshdr.asc == 0x3a || /* medium not present */ + sshdr.asc == 0x20 || /* invalid command */ + (sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */ /* this is no error here */ return 0; + /* + * This drive doesn't support sync and there's not much + * we can do because this is called during shutdown + * or suspend so just return success so those operations + * can proceed. + */ + if (sshdr.sense_key == ILLEGAL_REQUEST) + return 0; } switch (host_byte(res)) { @@ -3774,7 +3779,7 @@ static void sd_shutdown(struct device *dev) if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); - sd_sync_cache(sdkp, NULL); + sd_sync_cache(sdkp); } if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { @@ -3786,7 +3791,6 @@ static void sd_shutdown(struct device *dev) static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) { struct scsi_disk *sdkp = dev_get_drvdata(dev); - struct scsi_sense_hdr sshdr; int ret = 0; if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ @@ -3795,24 +3799,13 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) if (sdkp->WCE && sdkp->media_present) { if (!sdkp->device->silence_suspend) sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); - ret = sd_sync_cache(sdkp, &sshdr); + ret = sd_sync_cache(sdkp); + /* ignore OFFLINE device */ + if (ret == -ENODEV) + return 0; - if (ret) { - /* ignore OFFLINE device */ - if (ret == -ENODEV) - return 0; - - if (!scsi_sense_valid(&sshdr) || - sshdr.sense_key != ILLEGAL_REQUEST) - return ret; - - /* - * sshdr.sense_key == ILLEGAL_REQUEST means this drive - * doesn't support sync. There's not much to do and - * suspend shouldn't fail. - */ - ret = 0; - } + if (ret) + return ret; } if (sdkp->device->manage_start_stop) { From e439e4a62a8ea3c39d65c546de3af7d1c594077c Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Mon, 30 Oct 2023 10:43:11 +0100 Subject: [PATCH 097/299] scsi: ufs: qcom-ufs: dt-bindings: Document the SM8650 UFS Controller Document the UFS Controller on the SM8650 Platform. Reviewed-by: Alim Akhtar Reviewed-by: Manivannan Sadhasivam Signed-off-by: Neil Armstrong Link: https://lore.kernel.org/r/20231030-topic-sm8650-upstream-bindings-ufs-v3-1-a96364463fd5@linaro.org Reviewed-by: Krzysztof Kozlowski Signed-off-by: Martin K. Petersen --- Documentation/devicetree/bindings/ufs/qcom,ufs.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml index 462ead5a1cec..2cf3d016db42 100644 --- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml @@ -36,6 +36,7 @@ properties: - qcom,sm8350-ufshc - qcom,sm8450-ufshc - qcom,sm8550-ufshc + - qcom,sm8650-ufshc - const: qcom,ufshc - const: jedec,ufs-2.0 @@ -122,6 +123,7 @@ allOf: - qcom,sm8350-ufshc - qcom,sm8450-ufshc - qcom,sm8550-ufshc + - qcom,sm8650-ufshc then: properties: clocks: From ec9aedb2aa1ab7ac420c00b31f5edc5be15ec167 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 3 Jul 2023 00:28:02 +0800 Subject: [PATCH 098/299] x86/acpi: Ignore invalid x2APIC entries Currently, the kernel enumerates the possible CPUs by parsing both ACPI MADT Local APIC entries and x2APIC entries. So CPUs with "valid" APIC IDs, even if they have duplicated APIC IDs in Local APIC and x2APIC, are always enumerated. Below is what ACPI MADT Local APIC and x2APIC describes on an Ivebridge-EP system, [02Ch 0044 1] Subtable Type : 00 [Processor Local APIC] [02Fh 0047 1] Local Apic ID : 00 ... [164h 0356 1] Subtable Type : 00 [Processor Local APIC] [167h 0359 1] Local Apic ID : 39 [16Ch 0364 1] Subtable Type : 00 [Processor Local APIC] [16Fh 0367 1] Local Apic ID : FF ... [3ECh 1004 1] Subtable Type : 09 [Processor Local x2APIC] [3F0h 1008 4] Processor x2Apic ID : 00000000 ... [B5Ch 2908 1] Subtable Type : 09 [Processor Local x2APIC] [B60h 2912 4] Processor x2Apic ID : 00000077 As a result, kernel shows "smpboot: Allowing 168 CPUs, 120 hotplug CPUs". And this wastes significant amount of memory for the per-cpu data. Plus this also breaks https://lore.kernel.org/all/87edm36qqb.ffs@tglx/, because __max_logical_packages is over-estimated by the APIC IDs in the x2APIC entries. According to https://uefi.org/specs/ACPI/6.5/05_ACPI_Software_Programming_Model.html#processor-local-x2apic-structure: "[Compatibility note] On some legacy OSes, Logical processors with APIC ID values less than 255 (whether in XAPIC or X2APIC mode) must use the Processor Local APIC structure to convey their APIC information to OSPM, and those processors must be declared in the DSDT using the Processor() keyword. Logical processors with APIC ID values 255 and greater must use the Processor Local x2APIC structure and be declared using the Device() keyword." Therefore prevent the registration of x2APIC entries with an APIC ID less than 255 if the local APIC table enumerates valid APIC IDs. [ tglx: Simplify the logic ] Signed-off-by: Zhang Rui Signed-off-by: Thomas Gleixner Tested-by: Peter Zijlstra Link: https://lore.kernel.org/r/20230702162802.344176-1-rui.zhang@intel.com --- arch/x86/kernel/acpi/boot.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c55c0ef47a18..fc5bce1b5047 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -63,6 +63,7 @@ int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; +static bool has_lapic_cpus __initdata; static bool acpi_support_online_capable; #endif @@ -232,6 +233,14 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end) if (!acpi_is_processor_usable(processor->lapic_flags)) return 0; + /* + * According to https://uefi.org/specs/ACPI/6.5/05_ACPI_Software_Programming_Model.html#processor-local-x2apic-structure + * when MADT provides both valid LAPIC and x2APIC entries, the APIC ID + * in x2APIC must be equal or greater than 0xff. + */ + if (has_lapic_cpus && apic_id < 0xff) + return 0; + /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size @@ -1114,10 +1123,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void) static int __init acpi_parse_madt_lapic_entries(void) { - int count; - int x2count = 0; - int ret; - struct acpi_subtable_proc madt_proc[2]; + int count, x2count = 0; if (!boot_cpu_has(X86_FEATURE_APIC)) return -ENODEV; @@ -1126,21 +1132,11 @@ static int __init acpi_parse_madt_lapic_entries(void) acpi_parse_sapic, MAX_LOCAL_APIC); if (!count) { - memset(madt_proc, 0, sizeof(madt_proc)); - madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC; - madt_proc[0].handler = acpi_parse_lapic; - madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC; - madt_proc[1].handler = acpi_parse_x2apic; - ret = acpi_table_parse_entries_array(ACPI_SIG_MADT, - sizeof(struct acpi_table_madt), - madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC); - if (ret < 0) { - pr_err("Error parsing LAPIC/X2APIC entries\n"); - return ret; - } - - count = madt_proc[0].count; - x2count = madt_proc[1].count; + count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, + acpi_parse_lapic, MAX_LOCAL_APIC); + has_lapic_cpus = count > 0; + x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, + acpi_parse_x2apic, MAX_LOCAL_APIC); } if (!count && !x2count) { pr_err("No LAPIC entries present\n"); From 4b21a669ca21ed8f24ef4530b2918be5730114de Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Fri, 10 Nov 2023 15:16:06 +0800 Subject: [PATCH 099/299] ALSA: hda/realtek - Add Dell ALC295 to pin fall back table Add ALC295 to pin fall back table. Remove 5 pin quirks for Dell ALC295. ALC295 was only support MIC2 for external MIC function. ALC295 assigned model "ALC269_FIXUP_DELL1_MIC_NO_PRESENCE" for pin fall back table. It was assigned wrong model. So, let's remove it. Fixes: fbc571290d9f ("ALSA: hda/realtek - Fixed Headphone Mic can't record on Dell platform") Signed-off-by: Kailang Yang Cc: Link: https://lore.kernel.org/r/7c1998e873834df98d59bd7e0d08c72e@realtek.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 669ae3d6e447..d689f0050aae 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10821,22 +10821,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x03211020}), - SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, - {0x14, 0x90170110}, - {0x21, 0x04211020}), - SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, - {0x14, 0x90170110}, - {0x21, 0x04211030}), - SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, - ALC295_STANDARD_PINS, - {0x17, 0x21014020}, - {0x18, 0x21a19030}), - SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, - ALC295_STANDARD_PINS, - {0x17, 0x21014040}, - {0x18, 0x21a19050}), - SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, - ALC295_STANDARD_PINS), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_STANDARD_PINS, {0x17, 0x90170110}), @@ -10880,6 +10864,9 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = { SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, {0x19, 0x40000000}, {0x1b, 0x40000000}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, + {0x19, 0x40000000}, + {0x1b, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x19, 0x40000000}, {0x1a, 0x40000000}), From 8384c0baf223e1c3bc7b1c711d80a4c6106d210e Mon Sep 17 00:00:00 2001 From: Eymen Yigit Date: Fri, 10 Nov 2023 18:07:15 +0300 Subject: [PATCH 100/299] ALSA: hda/realtek: Enable Mute LED on HP 255 G8 This HP Notebook uses ALC236 codec with COEF 0x07 idx 1 controlling the mute LED. Enable already existing quirk for this device. Signed-off-by: Eymen Yigit Cc: Luka Guzenko Cc: Link: https://lore.kernel.org/r/20231110150715.5141-1-eymenyg01@gmail.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d689f0050aae..c7de74396185 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9832,6 +9832,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x890e, "HP 255 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), From c3803203bc5ec910a3eb06172cf6fb368e0e4390 Mon Sep 17 00:00:00 2001 From: Ani Sinha Date: Mon, 16 Oct 2023 19:01:22 +0530 Subject: [PATCH 101/299] hv/hv_kvp_daemon: Some small fixes for handling NM keyfiles Some small fixes: - lets make sure we are not adding ipv4 addresses in ipv6 section in keyfile and vice versa. - ADDR_FAMILY_IPV6 is a bit in addr_family. Test that bit instead of checking the whole value of addr_family. - Some trivial fixes in hv_set_ifconfig.sh. These fixes are proposed after doing some internal testing at Red Hat. CC: Shradha Gupta CC: Saurabh Sengar Fixes: 42999c904612 ("hv/hv_kvp_daemon:Support for keyfile based connection profile") Signed-off-by: Ani Sinha Reviewed-by: Shradha Gupta Signed-off-by: Wei Liu Message-ID: <20231016133122.2419537-1-anisinha@redhat.com> --- tools/hv/hv_kvp_daemon.c | 20 ++++++++++++-------- tools/hv/hv_set_ifconfig.sh | 4 ++-- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index 264eeb9c46a9..318e2dad27e0 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -1421,7 +1421,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) if (error) goto setval_error; - if (new_val->addr_family == ADDR_FAMILY_IPV6) { + if (new_val->addr_family & ADDR_FAMILY_IPV6) { error = fprintf(nmfile, "\n[ipv6]\n"); if (error < 0) goto setval_error; @@ -1455,14 +1455,18 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) if (error < 0) goto setval_error; - error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way); - if (error < 0) - goto setval_error; - - error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr); - if (error < 0) - goto setval_error; + /* we do not want ipv4 addresses in ipv6 section and vice versa */ + if (is_ipv6 != is_ipv4((char *)new_val->gate_way)) { + error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way); + if (error < 0) + goto setval_error; + } + if (is_ipv6 != is_ipv4((char *)new_val->dns_addr)) { + error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr); + if (error < 0) + goto setval_error; + } fclose(nmfile); fclose(ifcfg_file); diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh index ae5a7a8249a2..440a91b35823 100755 --- a/tools/hv/hv_set_ifconfig.sh +++ b/tools/hv/hv_set_ifconfig.sh @@ -53,7 +53,7 @@ # or "manual" if no boot-time protocol should be used) # # address1=ipaddr1/plen -# address=ipaddr2/plen +# address2=ipaddr2/plen # # gateway=gateway1;gateway2 # @@ -61,7 +61,7 @@ # # [ipv6] # address1=ipaddr1/plen -# address2=ipaddr1/plen +# address2=ipaddr2/plen # # gateway=gateway1;gateway2 # From 5c0930ccaad5a74d74e8b18b648c5eb21ed2fe94 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Nov 2023 15:57:13 +0100 Subject: [PATCH 102/299] hrtimers: Push pending hrtimers away from outgoing CPU earlier 2b8272ff4a70 ("cpu/hotplug: Prevent self deadlock on CPU hot-unplug") solved the straight forward CPU hotplug deadlock vs. the scheduler bandwidth timer. Yu discovered a more involved variant where a task which has a bandwidth timer started on the outgoing CPU holds a lock and then gets throttled. If the lock required by one of the CPU hotplug callbacks the hotplug operation deadlocks because the unthrottling timer event is not handled on the dying CPU and can only be recovered once the control CPU reaches the hotplug state which pulls the pending hrtimers from the dead CPU. Solve this by pushing the hrtimers away from the dying CPU in the dying callbacks. Nothing can queue a hrtimer on the dying CPU at that point because all other CPUs spin in stop_machine() with interrupts disabled and once the operation is finished the CPU is marked offline. Reported-by: Yu Liao Signed-off-by: Thomas Gleixner Tested-by: Liu Tie Link: https://lore.kernel.org/r/87a5rphara.ffs@tglx --- include/linux/cpuhotplug.h | 1 + include/linux/hrtimer.h | 4 ++-- kernel/cpu.c | 8 +++++++- kernel/time/hrtimer.c | 33 ++++++++++++--------------------- 4 files changed, 22 insertions(+), 24 deletions(-) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 068f7738be22..448f5f995adc 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -193,6 +193,7 @@ enum cpuhp_state { CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, + CPUHP_AP_HRTIMERS_DYING, CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 0ee140176f10..f2044d5a652b 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -531,9 +531,9 @@ extern void sysrq_timer_list_show(void); int hrtimers_prepare_cpu(unsigned int cpu); #ifdef CONFIG_HOTPLUG_CPU -int hrtimers_dead_cpu(unsigned int cpu); +int hrtimers_cpu_dying(unsigned int cpu); #else -#define hrtimers_dead_cpu NULL +#define hrtimers_cpu_dying NULL #endif #endif diff --git a/kernel/cpu.c b/kernel/cpu.c index 6de7c6bb74ee..2e69a1deaa31 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2098,7 +2098,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { [CPUHP_HRTIMERS_PREPARE] = { .name = "hrtimers:prepare", .startup.single = hrtimers_prepare_cpu, - .teardown.single = hrtimers_dead_cpu, + .teardown.single = NULL, }, [CPUHP_SMPCFD_PREPARE] = { .name = "smpcfd:prepare", @@ -2190,6 +2190,12 @@ static struct cpuhp_step cpuhp_hp_states[] = { .startup.single = NULL, .teardown.single = smpcfd_dying_cpu, }, + [CPUHP_AP_HRTIMERS_DYING] = { + .name = "hrtimers:dying", + .startup.single = NULL, + .teardown.single = hrtimers_cpu_dying, + }, + /* Entry state on starting. Interrupts enabled from here on. Transient * state for synchronsization */ [CPUHP_AP_ONLINE] = { diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 238262e4aba7..760793998cdd 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -2219,29 +2219,22 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, } } -int hrtimers_dead_cpu(unsigned int scpu) +int hrtimers_cpu_dying(unsigned int dying_cpu) { struct hrtimer_cpu_base *old_base, *new_base; - int i; + int i, ncpu = cpumask_first(cpu_active_mask); - BUG_ON(cpu_online(scpu)); - tick_cancel_sched_timer(scpu); + tick_cancel_sched_timer(dying_cpu); + + old_base = this_cpu_ptr(&hrtimer_bases); + new_base = &per_cpu(hrtimer_bases, ncpu); - /* - * this BH disable ensures that raise_softirq_irqoff() does - * not wakeup ksoftirqd (and acquire the pi-lock) while - * holding the cpu_base lock - */ - local_bh_disable(); - local_irq_disable(); - old_base = &per_cpu(hrtimer_bases, scpu); - new_base = this_cpu_ptr(&hrtimer_bases); /* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. */ - raw_spin_lock(&new_base->lock); - raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock(&old_base->lock); + raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { migrate_hrtimer_list(&old_base->clock_base[i], @@ -2252,15 +2245,13 @@ int hrtimers_dead_cpu(unsigned int scpu) * The migration might have changed the first expiring softirq * timer on this CPU. Update it. */ - hrtimer_update_softirq_timer(new_base, false); + __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT); + /* Tell the other CPU to retrigger the next event */ + smp_call_function_single(ncpu, retrigger_next_event, NULL, 0); - raw_spin_unlock(&old_base->lock); raw_spin_unlock(&new_base->lock); + raw_spin_unlock(&old_base->lock); - /* Check, if we got expired work to do */ - __hrtimer_peek_ahead_timers(); - local_irq_enable(); - local_bh_enable(); return 0; } From 713f040cd22285fcc506f40a0d259566e6758c3c Mon Sep 17 00:00:00 2001 From: Chandradeep Dey Date: Sat, 11 Nov 2023 19:25:49 +0100 Subject: [PATCH 103/299] ALSA: hda/realtek - Enable internal speaker of ASUS K6500ZC Apply the already existing quirk chain ALC294_FIXUP_ASUS_SPK to enable the internal speaker of ASUS K6500ZC. Signed-off-by: Chandradeep Dey Cc: Link: https://lore.kernel.org/r/NizcVHQ--3-9@chandradeepdey.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c7de74396185..cdd808e02b44 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9907,6 +9907,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), From 7e8037b099c0bbe8f2109dc452dbcab8d400fc53 Mon Sep 17 00:00:00 2001 From: Saurabh Sengar Date: Sat, 11 Nov 2023 00:37:47 -0800 Subject: [PATCH 104/299] x86/hyperv: Fix the detection of E820_TYPE_PRAM in a Gen2 VM A Gen2 VM doesn't support legacy PCI/PCIe, so both raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() -> pcibios_init() doesn't call pcibios_resource_survey() -> e820__reserve_resources_late(); as a result, any emulated persistent memory of E820_TYPE_PRAM (12) via the kernel parameter memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be detected by register_e820_pmem(). Fix this by directly calling e820__reserve_resources_late() in hv_pci_init(), which is called from arch_initcall(pci_arch_init). It's ok to move a Gen2 VM's e820__reserve_resources_late() from subsys_initcall(pci_subsys_init) to arch_initcall(pci_arch_init) because the code in-between doesn't depend on the E820 resources. e820__reserve_resources_late() depends on e820__reserve_resources(), which has been called earlier from setup_arch(). For a Gen-2 VM, the new hv_pci_init() also adds any memory of E820_TYPE_PMEM (7) into iomem_resource, and acpi_nfit_register_region() -> acpi_nfit_insert_resource() -> region_intersects() returns REGION_INTERSECTS, so the memory of E820_TYPE_PMEM won't get added twice. Changed the local variable "int gen2vm" to "bool gen2vm". Signed-off-by: Saurabh Sengar Signed-off-by: Dexuan Cui Signed-off-by: Wei Liu Message-ID: <1699691867-9827-1-git-send-email-ssengar@linux.microsoft.com> --- arch/x86/hyperv/hv_init.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 21556ad87f4b..8f3a4d16bb79 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -286,15 +287,31 @@ static int hv_cpu_die(unsigned int cpu) static int __init hv_pci_init(void) { - int gen2vm = efi_enabled(EFI_BOOT); + bool gen2vm = efi_enabled(EFI_BOOT); /* - * For Generation-2 VM, we exit from pci_arch_init() by returning 0. - * The purpose is to suppress the harmless warning: + * A Generation-2 VM doesn't support legacy PCI/PCIe, so both + * raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() -> + * pcibios_init() doesn't call pcibios_resource_survey() -> + * e820__reserve_resources_late(); as a result, any emulated persistent + * memory of E820_TYPE_PRAM (12) via the kernel parameter + * memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be + * detected by register_e820_pmem(). Fix this by directly calling + * e820__reserve_resources_late() here: e820__reserve_resources_late() + * depends on e820__reserve_resources(), which has been called earlier + * from setup_arch(). Note: e820__reserve_resources_late() also adds + * any memory of E820_TYPE_PMEM (7) into iomem_resource, and + * acpi_nfit_register_region() -> acpi_nfit_insert_resource() -> + * region_intersects() returns REGION_INTERSECTS, so the memory of + * E820_TYPE_PMEM won't get added twice. + * + * We return 0 here so that pci_arch_init() won't print the warning: * "PCI: Fatal: No config space access function found" */ - if (gen2vm) + if (gen2vm) { + e820__reserve_resources_late(); return 0; + } /* For Generation-1 VM, we'll proceed in pci_arch_init(). */ return 1; From 7b211c7671212cad0b83603c674838c7e824d845 Mon Sep 17 00:00:00 2001 From: Robert Marko Date: Fri, 10 Nov 2023 10:30:11 +0100 Subject: [PATCH 105/299] Revert "i2c: pxa: move to generic GPIO recovery" This reverts commit 0b01392c18b9993a584f36ace1d61118772ad0ca. Conversion of PXA to generic I2C recovery, makes the I2C bus completely lock up if recovery pinctrl is present in the DT and I2C recovery is enabled. So, until the generic I2C recovery can also work with PXA lets revert to have working I2C and I2C recovery again. Signed-off-by: Robert Marko Cc: stable@vger.kernel.org # 5.11+ Acked-by: Andi Shyti Acked-by: Russell King (Oracle) Acked-by: Linus Walleij Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-pxa.c | 76 ++++++++++++++++++++++++++++++++---- 1 file changed, 68 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 1d7648242749..76f79b68cef8 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -265,6 +265,9 @@ struct pxa_i2c { u32 hs_mask; struct i2c_bus_recovery_info recovery; + struct pinctrl *pinctrl; + struct pinctrl_state *pinctrl_default; + struct pinctrl_state *pinctrl_recovery; }; #define _IBMR(i2c) ((i2c)->reg_ibmr) @@ -1299,12 +1302,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap) */ gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS); gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS); + + WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery)); } static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap) { struct pxa_i2c *i2c = adap->algo_data; - struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; u32 isr; /* @@ -1318,7 +1322,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap) i2c_pxa_do_reset(i2c); } - WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default)); + WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default)); dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n", readl(_IBMR(i2c)), readl(_ISR(i2c))); @@ -1340,20 +1344,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c) if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE)) return 0; - bri->pinctrl = devm_pinctrl_get(dev); - if (PTR_ERR(bri->pinctrl) == -ENODEV) { - bri->pinctrl = NULL; + i2c->pinctrl = devm_pinctrl_get(dev); + if (PTR_ERR(i2c->pinctrl) == -ENODEV) + i2c->pinctrl = NULL; + if (IS_ERR(i2c->pinctrl)) + return PTR_ERR(i2c->pinctrl); + + if (!i2c->pinctrl) + return 0; + + i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl, + PINCTRL_STATE_DEFAULT); + i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery"); + + if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) { + dev_info(dev, "missing pinmux recovery information: %ld %ld\n", + PTR_ERR(i2c->pinctrl_default), + PTR_ERR(i2c->pinctrl_recovery)); + return 0; + } + + /* + * Claiming GPIOs can influence the pinmux state, and may glitch the + * I2C bus. Do this carefully. + */ + bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); + if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER)) + return -EPROBE_DEFER; + if (IS_ERR(bri->scl_gpiod)) { + dev_info(dev, "missing scl gpio recovery information: %pe\n", + bri->scl_gpiod); + return 0; + } + + /* + * We have SCL. Pull SCL low and wait a bit so that SDA glitches + * have no effect. + */ + gpiod_direction_output(bri->scl_gpiod, 0); + udelay(10); + bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN); + + /* Wait a bit in case of a SDA glitch, and then release SCL. */ + udelay(10); + gpiod_direction_output(bri->scl_gpiod, 1); + + if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER)) + return -EPROBE_DEFER; + + if (IS_ERR(bri->sda_gpiod)) { + dev_info(dev, "missing sda gpio recovery information: %pe\n", + bri->sda_gpiod); return 0; } - if (IS_ERR(bri->pinctrl)) - return PTR_ERR(bri->pinctrl); bri->prepare_recovery = i2c_pxa_prepare_recovery; bri->unprepare_recovery = i2c_pxa_unprepare_recovery; + bri->recover_bus = i2c_generic_scl_recovery; i2c->adap.bus_recovery_info = bri; - return 0; + /* + * Claiming GPIOs can change the pinmux state, which confuses the + * pinctrl since pinctrl's idea of the current setting is unaffected + * by the pinmux change caused by claiming the GPIO. Work around that + * by switching pinctrl to the GPIO state here. We do it this way to + * avoid glitching the I2C bus. + */ + pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery); + + return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default); } static int i2c_pxa_probe(struct platform_device *dev) From 2a5db859c6825b5d50377dda9c3cc729c20cad43 Mon Sep 17 00:00:00 2001 From: Long Li Date: Mon, 31 Jul 2023 20:46:17 +0800 Subject: [PATCH 106/299] xfs: factor out xfs_defer_pending_abort Factor out xfs_defer_pending_abort() from xfs_defer_trans_abort(), which not use transaction parameter, so it can be used after the transaction life cycle. Signed-off-by: Long Li Reviewed-by: Darrick J. Wong Signed-off-by: Chandan Babu R --- fs/xfs/libxfs/xfs_defer.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index bcfb6a4203cd..88388e12f8e7 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c @@ -245,26 +245,33 @@ xfs_defer_create_intents( return ret; } +STATIC void +xfs_defer_pending_abort( + struct xfs_mount *mp, + struct list_head *dop_list) +{ + struct xfs_defer_pending *dfp; + const struct xfs_defer_op_type *ops; + + /* Abort intent items that don't have a done item. */ + list_for_each_entry(dfp, dop_list, dfp_list) { + ops = defer_op_types[dfp->dfp_type]; + trace_xfs_defer_pending_abort(mp, dfp); + if (dfp->dfp_intent && !dfp->dfp_done) { + ops->abort_intent(dfp->dfp_intent); + dfp->dfp_intent = NULL; + } + } +} + /* Abort all the intents that were committed. */ STATIC void xfs_defer_trans_abort( struct xfs_trans *tp, struct list_head *dop_pending) { - struct xfs_defer_pending *dfp; - const struct xfs_defer_op_type *ops; - trace_xfs_defer_trans_abort(tp, _RET_IP_); - - /* Abort intent items that don't have a done item. */ - list_for_each_entry(dfp, dop_pending, dfp_list) { - ops = defer_op_types[dfp->dfp_type]; - trace_xfs_defer_pending_abort(tp->t_mountp, dfp); - if (dfp->dfp_intent && !dfp->dfp_done) { - ops->abort_intent(dfp->dfp_intent); - dfp->dfp_intent = NULL; - } - } + xfs_defer_pending_abort(tp->t_mountp, dop_pending); } /* From f8f9d952e42dd49ae534f61f2fa7ca0876cb9848 Mon Sep 17 00:00:00 2001 From: Long Li Date: Mon, 31 Jul 2023 20:46:18 +0800 Subject: [PATCH 107/299] xfs: abort intent items when recovery intents fail When recovering intents, we capture newly created intent items as part of committing recovered intent items. If intent recovery fails at a later point, we forget to remove those newly created intent items from the AIL and hang: [root@localhost ~]# cat /proc/539/stack [<0>] xfs_ail_push_all_sync+0x174/0x230 [<0>] xfs_unmount_flush_inodes+0x8d/0xd0 [<0>] xfs_mountfs+0x15f7/0x1e70 [<0>] xfs_fs_fill_super+0x10ec/0x1b20 [<0>] get_tree_bdev+0x3c8/0x730 [<0>] vfs_get_tree+0x89/0x2c0 [<0>] path_mount+0xecf/0x1800 [<0>] do_mount+0xf3/0x110 [<0>] __x64_sys_mount+0x154/0x1f0 [<0>] do_syscall_64+0x39/0x80 [<0>] entry_SYSCALL_64_after_hwframe+0x63/0xcd When newly created intent items fail to commit via transaction, intent recovery hasn't created done items for these newly created intent items, so the capture structure is the sole owner of the captured intent items. We must release them explicitly or else they leak: unreferenced object 0xffff888016719108 (size 432): comm "mount", pid 529, jiffies 4294706839 (age 144.463s) hex dump (first 32 bytes): 08 91 71 16 80 88 ff ff 08 91 71 16 80 88 ff ff ..q.......q..... 18 91 71 16 80 88 ff ff 18 91 71 16 80 88 ff ff ..q.......q..... backtrace: [] xfs_efi_init+0x18f/0x1d0 [] xfs_extent_free_create_intent+0x50/0x150 [] xfs_defer_create_intents+0x16a/0x340 [] xfs_defer_ops_capture_and_commit+0x8e/0xad0 [] xfs_cui_item_recover+0x819/0x980 [] xlog_recover_process_intents+0x246/0xb70 [] xlog_recover_finish+0x8a/0x9a0 [] xfs_log_mount_finish+0x2bb/0x4a0 [] xfs_mountfs+0x14bf/0x1e70 [] xfs_fs_fill_super+0x10d0/0x1b20 [] get_tree_bdev+0x3d2/0x6d0 [] vfs_get_tree+0x89/0x2c0 [] path_mount+0xecf/0x1800 [] do_mount+0xf3/0x110 [] __x64_sys_mount+0x154/0x1f0 [] do_syscall_64+0x39/0x80 Fix the problem above by abort intent items that don't have a done item when recovery intents fail. Fixes: e6fff81e4870 ("xfs: proper replay of deferred ops queued during log recovery") Signed-off-by: Long Li Reviewed-by: Darrick J. Wong Signed-off-by: Chandan Babu R --- fs/xfs/libxfs/xfs_defer.c | 5 +++-- fs/xfs/libxfs/xfs_defer.h | 2 +- fs/xfs/xfs_log_recover.c | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index 88388e12f8e7..f71679ce23b9 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c @@ -763,12 +763,13 @@ xfs_defer_ops_capture( /* Release all resources that we used to capture deferred ops. */ void -xfs_defer_ops_capture_free( +xfs_defer_ops_capture_abort( struct xfs_mount *mp, struct xfs_defer_capture *dfc) { unsigned short i; + xfs_defer_pending_abort(mp, &dfc->dfc_dfops); xfs_defer_cancel_list(mp, &dfc->dfc_dfops); for (i = 0; i < dfc->dfc_held.dr_bufs; i++) @@ -809,7 +810,7 @@ xfs_defer_ops_capture_and_commit( /* Commit the transaction and add the capture structure to the list. */ error = xfs_trans_commit(tp); if (error) { - xfs_defer_ops_capture_free(mp, dfc); + xfs_defer_ops_capture_abort(mp, dfc); return error; } diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h index 114a3a4930a3..8788ad5f6a73 100644 --- a/fs/xfs/libxfs/xfs_defer.h +++ b/fs/xfs/libxfs/xfs_defer.h @@ -121,7 +121,7 @@ int xfs_defer_ops_capture_and_commit(struct xfs_trans *tp, struct list_head *capture_list); void xfs_defer_ops_continue(struct xfs_defer_capture *d, struct xfs_trans *tp, struct xfs_defer_resources *dres); -void xfs_defer_ops_capture_free(struct xfs_mount *mp, +void xfs_defer_ops_capture_abort(struct xfs_mount *mp, struct xfs_defer_capture *d); void xfs_defer_resources_rele(struct xfs_defer_resources *dres); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 13b94d2e605b..a1e18b24971a 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2511,7 +2511,7 @@ xlog_abort_defer_ops( list_for_each_entry_safe(dfc, next, capture_list, dfc_list) { list_del_init(&dfc->dfc_list); - xfs_defer_ops_capture_free(mp, dfc); + xfs_defer_ops_capture_abort(mp, dfc); } } From 00080503612f61d1ad67be641ed9cb4f9f6ba40e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 19 Sep 2023 17:12:45 -0400 Subject: [PATCH 108/299] XFS: Update MAINTAINERS to catch all XFS documentation Assumes that all XFS documentation will be prefixed with xfs-, which seems like a good policy anyway. Reviewed-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Chandan Babu R --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 97f51d5ec1cf..a78795dc9aee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23872,8 +23872,7 @@ T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git P: Documentation/filesystems/xfs-maintainer-entry-profile.rst F: Documentation/ABI/testing/sysfs-fs-xfs F: Documentation/admin-guide/xfs.rst -F: Documentation/filesystems/xfs-delayed-logging-design.rst -F: Documentation/filesystems/xfs-self-describing-metadata.rst +F: Documentation/filesystems/xfs-* F: fs/xfs/ F: include/uapi/linux/dqblk_xfs.h F: include/uapi/linux/fsmap.h From 55f669f34184ecb25b8353f29c7f6f1ae5b313d1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Oct 2023 17:28:52 +0200 Subject: [PATCH 109/299] xfs: only remap the written blocks in xfs_reflink_end_cow_extent xfs_reflink_end_cow_extent looks up the COW extent and the data fork extent at offset_fsb, and then proceeds to remap the common subset between the two. It does however not limit the remapped extent to the passed in [*offset_fsbm end_fsb] range and thus potentially remaps more blocks than the one handled by the current I/O completion. This means that with sufficiently large data and COW extents we could be remapping COW fork mappings that have not been written to, leading to a stale data exposure on a powerfail event. We use to have a xfs_trim_range to make the remap fit the I/O completion range, but that got (apparently accidentally) removed in commit df2fd88f8ac7 ("xfs: rewrite xfs_reflink_end_cow to use intents"). Note that I've only found this by code inspection, and a test case would probably require very specific delay and error injection. Fixes: df2fd88f8ac7 ("xfs: rewrite xfs_reflink_end_cow to use intents") Signed-off-by: Christoph Hellwig Reviewed-by: "Darrick J. Wong" Signed-off-by: Chandan Babu R --- fs/xfs/xfs_reflink.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 658edee8381d..e5b62dc28466 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -784,6 +784,7 @@ xfs_reflink_end_cow_extent( } } del = got; + xfs_trim_extent(&del, *offset_fsb, end_fsb - *offset_fsb); /* Grab the corresponding mapping in the data fork. */ nmaps = 1; From 471de20303dda0b67981e06d59cc6c4a83fd2a3c Mon Sep 17 00:00:00 2001 From: Leah Rumancik Date: Mon, 30 Oct 2023 13:33:49 -0700 Subject: [PATCH 110/299] xfs: up(ic_sema) if flushing data device fails We flush the data device cache before we issue external log IO. If the flush fails, we shut down the log immediately and return. However, the iclog->ic_sema is left in a decremented state so let's add an up(). Prior to this patch, xfs/438 would fail consistently when running with an external log device: sync -> xfs_log_force -> xlog_write_iclog -> down(&iclog->ic_sema) -> blkdev_issue_flush (fail causes us to intiate shutdown) -> xlog_force_shutdown -> return unmount -> xfs_log_umount -> xlog_wait_iclog_completion -> down(&iclog->ic_sema) --------> HANG There is a second early return / shutdown. Make sure the up() happens for it as well. Also make sure we cleanup the iclog state, xlog_state_done_syncing, before dropping the iclog lock. Fixes: b5d721eaae47 ("xfs: external logs need to flush data device") Fixes: 842a42d126b4 ("xfs: shutdown on failure to add page to log bio") Fixes: 7d839e325af2 ("xfs: check return codes when flushing block devices") Signed-off-by: Leah Rumancik Reviewed-by: "Darrick J. Wong" Signed-off-by: Chandan Babu R --- fs/xfs/xfs_log.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 51c100c86177..ee206facf0dc 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1893,9 +1893,7 @@ xlog_write_iclog( * the buffer manually, the code needs to be kept in sync * with the I/O completion path. */ - xlog_state_done_syncing(iclog); - up(&iclog->ic_sema); - return; + goto sync; } /* @@ -1925,20 +1923,17 @@ xlog_write_iclog( * avoid shutdown re-entering this path and erroring out again. */ if (log->l_targ != log->l_mp->m_ddev_targp && - blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) { - xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); - return; - } + blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) + goto shutdown; } if (iclog->ic_flags & XLOG_ICL_NEED_FUA) iclog->ic_bio.bi_opf |= REQ_FUA; iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); - if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { - xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); - return; - } + if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) + goto shutdown; + if (is_vmalloc_addr(iclog->ic_data)) flush_kernel_vmap_range(iclog->ic_data, count); @@ -1959,6 +1954,12 @@ xlog_write_iclog( } submit_bio(&iclog->ic_bio); + return; +shutdown: + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); +sync: + xlog_state_done_syncing(iclog); + up(&iclog->ic_sema); } /* From f63a5b3769ad7659da4c0420751d78958ab97675 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Wed, 1 Nov 2023 09:41:45 -0700 Subject: [PATCH 111/299] xfs: fix internal error from AGFL exhaustion We've been seeing XFS errors like the following: XFS: Internal error i != 1 at line 3526 of file fs/xfs/libxfs/xfs_btree.c. Caller xfs_btree_insert+0x1ec/0x280 ... Call Trace: xfs_corruption_error+0x94/0xa0 xfs_btree_insert+0x221/0x280 xfs_alloc_fixup_trees+0x104/0x3e0 xfs_alloc_ag_vextent_size+0x667/0x820 xfs_alloc_fix_freelist+0x5d9/0x750 xfs_free_extent_fix_freelist+0x65/0xa0 __xfs_free_extent+0x57/0x180 ... This is the XFS_IS_CORRUPT() check in xfs_btree_insert() when xfs_btree_insrec() fails. After converting this into a panic and dissecting the core dump, I found that xfs_btree_insrec() is failing because it's trying to split a leaf node in the cntbt when the AG free list is empty. In particular, it's failing to get a block from the AGFL _while trying to refill the AGFL_. If a single operation splits every level of the bnobt and the cntbt (and the rmapbt if it is enabled) at once, the free list will be empty. Then, when the next operation tries to refill the free list, it allocates space. If the allocation does not use a full extent, it will need to insert records for the remaining space in the bnobt and cntbt. And if those new records go in full leaves, the leaves (and potentially more nodes up to the old root) need to be split. Fix it by accounting for the additional splits that may be required to refill the free list in the calculation for the minimum free list size. P.S. As far as I can tell, this bug has existed for a long time -- maybe back to xfs-history commit afdf80ae7405 ("Add XFS_AG_MAXLEVELS macros ...") in April 1994! It requires a very unlucky sequence of events, and in fact we didn't hit it until a particular sparse mmap workload updated from 5.12 to 5.19. But this bug existed in 5.12, so it must've been exposed by some other change in allocation or writeback patterns. It's also much less likely to be hit with the rmapbt enabled, since that increases the minimum free list size and is unlikely to split at the same time as the bnobt and cntbt. Reviewed-by: "Darrick J. Wong" Reviewed-by: Dave Chinner Signed-off-by: Omar Sandoval Signed-off-by: Chandan Babu R --- fs/xfs/libxfs/xfs_alloc.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 3069194527dd..100ab5931b31 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2275,16 +2275,37 @@ xfs_alloc_min_freelist( ASSERT(mp->m_alloc_maxlevels > 0); + /* + * For a btree shorter than the maximum height, the worst case is that + * every level gets split and a new level is added, then while inserting + * another entry to refill the AGFL, every level under the old root gets + * split again. This is: + * + * (full height split reservation) + (AGFL refill split height) + * = (current height + 1) + (current height - 1) + * = (new height) + (new height - 2) + * = 2 * new height - 2 + * + * For a btree of maximum height, the worst case is that every level + * under the root gets split, then while inserting another entry to + * refill the AGFL, every level under the root gets split again. This is + * also: + * + * 2 * (current height - 1) + * = 2 * (new height - 1) + * = 2 * new height - 2 + */ + /* space needed by-bno freespace btree */ min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1, - mp->m_alloc_maxlevels); + mp->m_alloc_maxlevels) * 2 - 2; /* space needed by-size freespace btree */ min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1, - mp->m_alloc_maxlevels); + mp->m_alloc_maxlevels) * 2 - 2; /* space needed reverse mapping used space btree */ if (xfs_has_rmapbt(mp)) min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1, - mp->m_rmap_maxlevels); + mp->m_rmap_maxlevels) * 2 - 2; return min_free; } From a2e4388adfa44684c7c428a5a5980efe0d75e13e Mon Sep 17 00:00:00 2001 From: Anthony Iliopoulos Date: Sun, 5 Nov 2023 20:23:18 +0100 Subject: [PATCH 112/299] xfs: fix again select in kconfig XFS_ONLINE_SCRUB_STATS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 57c0f4a8ea3a attempted to fix the select in the kconfig entry XFS_ONLINE_SCRUB_STATS by selecting XFS_DEBUG, but the original intention was to select DEBUG_FS, since the feature relies on debugfs to export the related scrub statistics. Fixes: 57c0f4a8ea3a ("xfs: fix select in config XFS_ONLINE_SCRUB_STATS") Reported-by: Holger Hoffstätte Signed-off-by: Anthony Iliopoulos Reviewed-by: Dave Chinner Reviewed-by: "Darrick J. Wong" Signed-off-by: Chandan Babu R --- fs/xfs/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig index ed0bc8cbc703..567fb37274d3 100644 --- a/fs/xfs/Kconfig +++ b/fs/xfs/Kconfig @@ -147,7 +147,7 @@ config XFS_ONLINE_SCRUB_STATS bool "XFS online metadata check usage data collection" default y depends on XFS_ONLINE_SCRUB - select XFS_DEBUG + select DEBUG_FS help If you say Y here, the kernel will gather usage data about the online metadata check subsystem. This includes the number From 038ca189c0d2c1570b4d922f25b524007c85cf94 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 10 Nov 2023 15:33:13 +1100 Subject: [PATCH 113/299] xfs: inode recovery does not validate the recovered inode Discovered when trying to track down a weird recovery corruption issue that wasn't detected at recovery time. The specific corruption was a zero extent count field when big extent counts are in use, and it turns out the dinode verifier doesn't detect that specific corruption case, either. So fix it too. Signed-off-by: Dave Chinner Reviewed-by: "Darrick J. Wong" Signed-off-by: Chandan Babu R --- fs/xfs/libxfs/xfs_inode_buf.c | 3 +++ fs/xfs/xfs_inode_item_recover.c | 14 +++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 543f3748c2a3..137a65bda95d 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -510,6 +510,9 @@ xfs_dinode_verify( if (mode && nextents + naextents > nblocks) return __this_address; + if (nextents + naextents == 0 && nblocks != 0) + return __this_address; + if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents) return __this_address; diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c index 0e5dba2343ea..1c5ba2a732b0 100644 --- a/fs/xfs/xfs_inode_item_recover.c +++ b/fs/xfs/xfs_inode_item_recover.c @@ -286,6 +286,7 @@ xlog_recover_inode_commit_pass2( struct xfs_log_dinode *ldip; uint isize; int need_free = 0; + xfs_failaddr_t fa; if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { in_f = item->ri_buf[0].i_addr; @@ -528,8 +529,19 @@ out_owner_change: (dip->di_mode != 0)) error = xfs_recover_inode_owner_change(mp, dip, in_f, buffer_list); - /* re-generate the checksum. */ + /* re-generate the checksum and validate the recovered inode. */ xfs_dinode_calc_crc(log->l_mp, dip); + fa = xfs_dinode_verify(log->l_mp, in_f->ilf_ino, dip); + if (fa) { + XFS_CORRUPTION_ERROR( + "Bad dinode after recovery", + XFS_ERRLEVEL_LOW, mp, dip, sizeof(*dip)); + xfs_alert(mp, + "Metadata corruption detected at %pS, inode 0x%llx", + fa, in_f->ilf_ino); + error = -EFSCORRUPTED; + goto out_release; + } ASSERT(bp->b_mount == mp); bp->b_flags |= _XBF_LOGRECOVERY; From 7930d9e103700cde15833638855b750715c12091 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 10 Nov 2023 15:33:14 +1100 Subject: [PATCH 114/299] xfs: recovery should not clear di_flushiter unconditionally Because on v3 inodes, di_flushiter doesn't exist. It overlaps with zero padding in the inode, except when NREXT64=1 configurations are in use and the zero padding is no longer padding but holds the 64 bit extent counter. This manifests obviously on big endian platforms (e.g. s390) because the log dinode is in host order and the overlap is the LSBs of the extent count field. It is not noticed on little endian machines because the overlap is at the MSB end of the extent count field and we need to get more than 2^^48 extents in the inode before it manifests. i.e. the heat death of the universe will occur before we see the problem in little endian machines. This is a zero-day issue for NREXT64=1 configuraitons on big endian machines. Fix it by only clearing di_flushiter on v2 inodes during recovery. Fixes: 9b7d16e34bbe ("xfs: Introduce XFS_DIFLAG2_NREXT64 and associated helpers") cc: stable@kernel.org # 5.19+ Signed-off-by: Dave Chinner Reviewed-by: "Darrick J. Wong" Signed-off-by: Chandan Babu R --- fs/xfs/xfs_inode_item_recover.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c index 1c5ba2a732b0..144198a6b270 100644 --- a/fs/xfs/xfs_inode_item_recover.c +++ b/fs/xfs/xfs_inode_item_recover.c @@ -370,24 +370,26 @@ xlog_recover_inode_commit_pass2( * superblock flag to determine whether we need to look at di_flushiter * to skip replay when the on disk inode is newer than the log one */ - if (!xfs_has_v3inodes(mp) && - ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) { - /* - * Deal with the wrap case, DI_MAX_FLUSH is less - * than smaller numbers - */ - if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && - ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) { - /* do nothing */ - } else { - trace_xfs_log_recover_inode_skip(log, in_f); - error = 0; - goto out_release; + if (!xfs_has_v3inodes(mp)) { + if (ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) { + /* + * Deal with the wrap case, DI_MAX_FLUSH is less + * than smaller numbers + */ + if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && + ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) { + /* do nothing */ + } else { + trace_xfs_log_recover_inode_skip(log, in_f); + error = 0; + goto out_release; + } } + + /* Take the opportunity to reset the flush iteration count */ + ldip->di_flushiter = 0; } - /* Take the opportunity to reset the flush iteration count */ - ldip->di_flushiter = 0; if (unlikely(S_ISREG(ldip->di_mode))) { if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) && From f726eaa787e9f9bc858c902d18a09af6bcbfcdaf Mon Sep 17 00:00:00 2001 From: Jan Bottorff Date: Thu, 9 Nov 2023 03:19:27 +0000 Subject: [PATCH 115/299] i2c: designware: Fix corrupted memory seen in the ISR When running on a many core ARM64 server, errors were happening in the ISR that looked like corrupted memory. These corruptions would fix themselves if small delays were inserted in the ISR. Errors reported by the driver included "i2c_designware APMC0D0F:00: i2c_dw_xfer_msg: invalid target address" and "i2c_designware APMC0D0F:00:controller timed out" during in-band IPMI SSIF stress tests. The problem was determined to be memory writes in the driver were not becoming visible to all cores when execution rapidly shifted between cores, like when a register write immediately triggers an ISR. Processors with weak memory ordering, like ARM64, make no guarantees about the order normal memory writes become globally visible, unless barrier instructions are used to control ordering. To solve this, regmap accessor functions configured by this driver were changed to use non-relaxed forms of the low-level register access functions, which include a barrier on platforms that require it. This assures memory writes before a controller register access are visible to all cores. The community concluded defaulting to correct operation outweighed defaulting to the small performance gains from using relaxed access functions. Being a low speed device added weight to this choice of default register access behavior. Signed-off-by: Jan Bottorff Acked-by: Jarkko Nikula Tested-by: Serge Semin Reviewed-by: Serge Semin Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-common.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index affcfb243f0f..35f762872b8a 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -63,7 +63,7 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; - *val = readl_relaxed(dev->base + reg); + *val = readl(dev->base + reg); return 0; } @@ -72,7 +72,7 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; - writel_relaxed(val, dev->base + reg); + writel(val, dev->base + reg); return 0; } @@ -81,7 +81,7 @@ static int dw_reg_read_swab(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; - *val = swab32(readl_relaxed(dev->base + reg)); + *val = swab32(readl(dev->base + reg)); return 0; } @@ -90,7 +90,7 @@ static int dw_reg_write_swab(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; - writel_relaxed(swab32(val), dev->base + reg); + writel(swab32(val), dev->base + reg); return 0; } @@ -99,8 +99,8 @@ static int dw_reg_read_word(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; - *val = readw_relaxed(dev->base + reg) | - (readw_relaxed(dev->base + reg + 2) << 16); + *val = readw(dev->base + reg) | + (readw(dev->base + reg + 2) << 16); return 0; } @@ -109,8 +109,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; - writew_relaxed(val, dev->base + reg); - writew_relaxed(val >> 16, dev->base + reg + 2); + writew(val, dev->base + reg); + writew(val >> 16, dev->base + reg + 2); return 0; } From b0077e269f6c152e807fdac90b58caf012cdbaab Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 13 Nov 2023 11:52:31 +0800 Subject: [PATCH 116/299] blk-mq: make sure active queue usage is held for bio_integrity_prep() blk_integrity_unregister() can come if queue usage counter isn't held for one bio with integrity prepared, so this request may be completed with calling profile->complete_fn, then kernel panic. Another constraint is that bio_integrity_prep() needs to be called before bio merge. Fix the issue by: - call bio_integrity_prep() with one queue usage counter grabbed reliably - call bio_integrity_prep() before bio merge Fixes: 900e080752025f00 ("block: move queue enter logic into blk_mq_submit_bio()") Reported-by: Yi Zhang Cc: Christoph Hellwig Signed-off-by: Ming Lei Tested-by: Yi Zhang Link: https://lore.kernel.org/r/20231113035231.2708053-1-ming.lei@redhat.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 75 +++++++++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index e2d11183f62e..900c1be1fee1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2858,11 +2858,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, }; struct request *rq; - if (unlikely(bio_queue_enter(bio))) - return NULL; - if (blk_mq_attempt_bio_merge(q, bio, nsegs)) - goto queue_exit; + return NULL; rq_qos_throttle(q, bio); @@ -2878,35 +2875,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, rq_qos_cleanup(q, bio); if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); -queue_exit: - blk_queue_exit(q); return NULL; } -static inline struct request *blk_mq_get_cached_request(struct request_queue *q, - struct blk_plug *plug, struct bio **bio, unsigned int nsegs) +/* return true if this @rq can be used for @bio */ +static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug, + struct bio *bio) { - struct request *rq; - enum hctx_type type, hctx_type; + enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf); + enum hctx_type hctx_type = rq->mq_hctx->type; - if (!plug) - return NULL; - rq = rq_list_peek(&plug->cached_rq); - if (!rq || rq->q != q) - return NULL; + WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); - if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { - *bio = NULL; - return NULL; - } - - type = blk_mq_get_hctx_type((*bio)->bi_opf); - hctx_type = rq->mq_hctx->type; if (type != hctx_type && !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) - return NULL; - if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) - return NULL; + return false; + if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf)) + return false; /* * If any qos ->throttle() end up blocking, we will have flushed the @@ -2914,12 +2899,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, * before we throttle. */ plug->cached_rq = rq_list_next(rq); - rq_qos_throttle(q, *bio); + rq_qos_throttle(rq->q, bio); blk_mq_rq_time_init(rq, 0); - rq->cmd_flags = (*bio)->bi_opf; + rq->cmd_flags = bio->bi_opf; INIT_LIST_HEAD(&rq->queuelist); - return rq; + return true; } static void bio_set_ioprio(struct bio *bio) @@ -2949,7 +2934,7 @@ void blk_mq_submit_bio(struct bio *bio) struct blk_plug *plug = blk_mq_plug(bio); const int is_sync = op_is_sync(bio->bi_opf); struct blk_mq_hw_ctx *hctx; - struct request *rq; + struct request *rq = NULL; unsigned int nr_segs = 1; blk_status_t ret; @@ -2960,20 +2945,36 @@ void blk_mq_submit_bio(struct bio *bio) return; } - if (!bio_integrity_prep(bio)) - return; - bio_set_ioprio(bio); - rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); - if (!rq) { - if (!bio) + if (plug) { + rq = rq_list_peek(&plug->cached_rq); + if (rq && rq->q != q) + rq = NULL; + } + if (rq) { + if (!bio_integrity_prep(bio)) return; - rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); - if (unlikely(!rq)) + if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) return; + if (blk_mq_can_use_cached_rq(rq, plug, bio)) + goto done; + percpu_ref_get(&q->q_usage_counter); + } else { + if (unlikely(bio_queue_enter(bio))) + return; + if (!bio_integrity_prep(bio)) + goto fail; } + rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); + if (unlikely(!rq)) { +fail: + blk_queue_exit(q); + return; + } + +done: trace_block_getrq(bio); rq_qos_track(q, rq, bio); From d02ef87db9d6137fc2a98231b92f24ead4f7966d Mon Sep 17 00:00:00 2001 From: Simon Trimmer Date: Mon, 13 Nov 2023 16:40:29 +0000 Subject: [PATCH 117/299] ALSA: hda: cs35l56: Enable low-power hibernation mode on i2c This can now be re-enabled as the sequence to reliably wake the device has been implemented in the shared ASoC code. This has a functional dependency on commit 3df761bdbc8b ("ASoC: cs35l56: Wake transactions need to be issued twice") To protect against this, enabling hibernation is conditional on CS35L56_WAKE_HOLD_TIME_US being defined, which indicates that the new hibernation sequences are available. Signed-off-by: Simon Trimmer Signed-off-by: Richard Fitzgerald Link: https://lore.kernel.org/r/20231113164029.1156669-1-rf@opensource.cirrus.com Signed-off-by: Takashi Iwai --- sound/pci/hda/cs35l56_hda_i2c.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/pci/hda/cs35l56_hda_i2c.c b/sound/pci/hda/cs35l56_hda_i2c.c index 757a4d193e0f..a9ef6d86de83 100644 --- a/sound/pci/hda/cs35l56_hda_i2c.c +++ b/sound/pci/hda/cs35l56_hda_i2c.c @@ -21,6 +21,10 @@ static int cs35l56_hda_i2c_probe(struct i2c_client *clt) return -ENOMEM; cs35l56->base.dev = &clt->dev; + +#ifdef CS35L56_WAKE_HOLD_TIME_US + cs35l56->base.can_hibernate = true; +#endif cs35l56->base.regmap = devm_regmap_init_i2c(clt, &cs35l56_regmap_i2c); if (IS_ERR(cs35l56->base.regmap)) { ret = PTR_ERR(cs35l56->base.regmap); From 382561d16854a747e6df71034da08d20d6013dfe Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Sun, 12 Nov 2023 18:32:45 -0800 Subject: [PATCH 118/299] i2c: ocores: Move system PM hooks to the NOIRQ phase When an I2C device contains a wake IRQ subordinate to a regmap-irq chip, the regmap-irq code must be able to perform I2C transactions during suspend_device_irqs() and resume_device_irqs(). Therefore, the bus must be suspended/resumed during the NOIRQ phase. Signed-off-by: Samuel Holland Acked-by: Peter Korsgaard Reviewed-by: Andi Shyti Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-ocores.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 041a76f71a49..e106af83cef4 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -771,8 +771,8 @@ static int ocores_i2c_resume(struct device *dev) return ocores_init(dev, i2c); } -static DEFINE_SIMPLE_DEV_PM_OPS(ocores_i2c_pm, - ocores_i2c_suspend, ocores_i2c_resume); +static DEFINE_NOIRQ_DEV_PM_OPS(ocores_i2c_pm, + ocores_i2c_suspend, ocores_i2c_resume); static struct platform_driver ocores_i2c_driver = { .probe = ocores_i2c_probe, From ff31ba19d732efb9aca3633935d71085e68d5076 Mon Sep 17 00:00:00 2001 From: Anastasia Belova Date: Mon, 13 Nov 2023 17:52:32 +0300 Subject: [PATCH 119/299] cifs: spnego: add ';' in HOST_KEY_LEN "host=" should start with ';' (as in cifs_get_spnego_key) So its length should be 6. Found by Linux Verification Center (linuxtesting.org) with SVACE. Reviewed-by: Paulo Alcantara (SUSE) Fixes: 7c9c3760b3a5 ("[CIFS] add constants for string lengths of keynames in SPNEGO upcall string") Signed-off-by: Anastasia Belova Co-developed-by: Ekaterina Esina Signed-off-by: Ekaterina Esina Signed-off-by: Steve French --- fs/smb/client/cifs_spnego.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c index 6f3285f1dfee..af7849e5974f 100644 --- a/fs/smb/client/cifs_spnego.c +++ b/fs/smb/client/cifs_spnego.c @@ -64,8 +64,8 @@ struct key_type cifs_spnego_key_type = { * strlen(";sec=ntlmsspi") */ #define MAX_MECH_STR_LEN 13 -/* strlen of "host=" */ -#define HOST_KEY_LEN 5 +/* strlen of ";host=" */ +#define HOST_KEY_LEN 6 /* strlen of ";ip4=" or ";ip6=" */ #define IP_KEY_LEN 5 From 181724fc72486dec2bec8803459be05b5162aaa8 Mon Sep 17 00:00:00 2001 From: Ekaterina Esina Date: Mon, 13 Nov 2023 19:42:41 +0300 Subject: [PATCH 120/299] cifs: fix check of rc in function generate_smb3signingkey Remove extra check after condition, add check after generating key for encryption. The check is needed to return non zero rc before rewriting it with generating key for decryption. Found by Linux Verification Center (linuxtesting.org) with SVACE. Reviewed-by: Paulo Alcantara (SUSE) Fixes: d70e9fa55884 ("cifs: try opening channels after mounting") Signed-off-by: Ekaterina Esina Co-developed-by: Anastasia Belova Signed-off-by: Anastasia Belova Signed-off-by: Steve French --- fs/smb/client/smb2transport.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c index 84ea67301303..5a3ca62d2f07 100644 --- a/fs/smb/client/smb2transport.c +++ b/fs/smb/client/smb2transport.c @@ -458,6 +458,8 @@ generate_smb3signingkey(struct cifs_ses *ses, ptriplet->encryption.context, ses->smb3encryptionkey, SMB3_ENC_DEC_KEY_SIZE); + if (rc) + return rc; rc = generate_key(ses, ptriplet->decryption.label, ptriplet->decryption.context, ses->smb3decryptionkey, @@ -466,9 +468,6 @@ generate_smb3signingkey(struct cifs_ses *ses, return rc; } - if (rc) - return rc; - #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__); /* From 48d584b7f90f48d2623fb01743b099efbf0d36c2 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Mon, 6 Nov 2023 10:34:01 +0800 Subject: [PATCH 121/299] bcachefs: make bch2_target_to_text_sb static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bch2_target_to_text_sb are not used outside the file disk_groups.c, so the modification is defined as static. fs/bcachefs/disk_groups.c:583:6: warning: no previous prototype for ‘bch2_target_to_text_sb’. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=7144 Signed-off-by: Jiapeng Chong Signed-off-by: Kent Overstreet --- fs/bcachefs/disk_groups.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c index d613695abf9f..1f334124055b 100644 --- a/fs/bcachefs/disk_groups.c +++ b/fs/bcachefs/disk_groups.c @@ -580,7 +580,7 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) } } -void bch2_target_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v) +static void bch2_target_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v) { struct target t = target_decode(v); From c4f1f80a0e8d829ce4e29ca52cb7f74b22f67454 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 11 Nov 2023 12:30:19 -0500 Subject: [PATCH 122/299] bcachefs: Use correct fgf_t type as function argument This quiets a sparse complaint. Signed-off-by: Kent Overstreet --- fs/bcachefs/fs-io-pagecache.c | 2 +- fs/bcachefs/fs-io-pagecache.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c index 8bd9bcdd27f7..ff664fd0d8ef 100644 --- a/fs/bcachefs/fs-io-pagecache.c +++ b/fs/bcachefs/fs-io-pagecache.c @@ -13,7 +13,7 @@ int bch2_filemap_get_contig_folios_d(struct address_space *mapping, loff_t start, u64 end, - int fgp_flags, gfp_t gfp, + fgf_t fgp_flags, gfp_t gfp, folios *fs) { struct folio *f; diff --git a/fs/bcachefs/fs-io-pagecache.h b/fs/bcachefs/fs-io-pagecache.h index a2222ad586e9..27f712ae37a6 100644 --- a/fs/bcachefs/fs-io-pagecache.h +++ b/fs/bcachefs/fs-io-pagecache.h @@ -7,7 +7,7 @@ typedef DARRAY(struct folio *) folios; int bch2_filemap_get_contig_folios_d(struct address_space *, loff_t, - u64, int, gfp_t, folios *); + u64, fgf_t, gfp_t, folios *); int bch2_write_invalidate_inode_pages_range(struct address_space *, loff_t, loff_t); /* From 1b8bc556280d3f4970407480e6a5ff49efe5601b Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 6 Nov 2023 16:27:02 -0600 Subject: [PATCH 123/299] bcachefs: Use DECLARE_FLEX_ARRAY() helper and fix multiple -Warray-bounds warnings Transform zero-length array `s` into a proper flexible-array member in `struct snapshot_table` via the DECLARE_FLEX_ARRAY() helper; and fix tons of the following -Warray-bounds warnings: fs/bcachefs/snapshot.h:36:21: warning: array subscript is outside array bounds of 'struct snapshot_t[0]' [-Warray-bounds=] fs/bcachefs/snapshot.h:36:21: warning: array subscript is outside array bounds of 'struct snapshot_t[0]' [-Warray-bounds=] fs/bcachefs/snapshot.c:135:70: warning: array subscript is outside array bounds of 'struct snapshot_t[0]' [-Warray-bounds=] fs/bcachefs/snapshot.h:36:21: warning: array subscript is outside array bounds of 'struct snapshot_t[0]' [-Warray-bounds=] fs/bcachefs/snapshot.h:36:21: warning: array subscript is outside array bounds of 'struct snapshot_t[0]' [-Warray-bounds=] fs/bcachefs/snapshot.h:36:21: warning: array subscript is outside array bounds of 'struct snapshot_t[0]' [-Warray-bounds=] This helps with the ongoing efforts to globally enable -Warray-bounds. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kent Overstreet --- fs/bcachefs/subvolume_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h index 86833445af20..2d2e66a4e468 100644 --- a/fs/bcachefs/subvolume_types.h +++ b/fs/bcachefs/subvolume_types.h @@ -20,7 +20,7 @@ struct snapshot_t { }; struct snapshot_table { - struct snapshot_t s[0]; + DECLARE_FLEX_ARRAY(struct snapshot_t, s); }; typedef struct { From 274c2f8fd27158d15524abe63c3df6fb96707dd3 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 6 Nov 2023 15:40:22 -0600 Subject: [PATCH 124/299] bcachefs: Fix multiple -Warray-bounds warnings Transform zero-length array `entries` into a proper flexible-array member in `struct journal_seq_blacklist_table`; and fix the following -Warray-bounds warnings: fs/bcachefs/journal_seq_blacklist.c:148:26: warning: array subscript idx is outside array bounds of 'struct journal_seq_blacklist_table_entry[0]' [-Warray-bounds=] fs/bcachefs/journal_seq_blacklist.c:150:30: warning: array subscript idx is outside array bounds of 'struct journal_seq_blacklist_table_entry[0]' [-Warray-bounds=] fs/bcachefs/journal_seq_blacklist.c:154:27: warning: array subscript idx is outside array bounds of 'struct journal_seq_blacklist_table_entry[0]' [-Warray-bounds=] fs/bcachefs/journal_seq_blacklist.c:176:27: warning: array subscript i is outside array bounds of 'struct journal_seq_blacklist_table_entry[0]' [-Warray-bounds=] fs/bcachefs/journal_seq_blacklist.c:177:27: warning: array subscript i is outside array bounds of 'struct journal_seq_blacklist_table_entry[0]' [-Warray-bounds=] fs/bcachefs/journal_seq_blacklist.c:297:34: warning: array subscript i is outside array bounds of 'struct journal_seq_blacklist_table_entry[0]' [-Warray-bounds=] fs/bcachefs/journal_seq_blacklist.c:298:34: warning: array subscript i is outside array bounds of 'struct journal_seq_blacklist_table_entry[0]' [-Warray-bounds=] fs/bcachefs/journal_seq_blacklist.c:300:31: warning: array subscript i is outside array bounds of 'struct journal_seq_blacklist_table_entry[0]' [-Warray-bounds=] This results in no differences in binary output. This helps with the ongoing efforts to globally enable -Warray-bounds. Signed-off-by: Gustavo A. R. Silva Signed-off-by: Kent Overstreet --- fs/bcachefs/bcachefs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 9cb8684959ee..403aa3389fcc 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -617,7 +617,7 @@ struct journal_seq_blacklist_table { u64 start; u64 end; bool dirty; - } entries[0]; + } entries[]; }; struct journal_keys { From 03cc1e67a243cbb2c85d5fd84f369449f94d4269 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 7 Nov 2023 10:30:22 -0500 Subject: [PATCH 125/299] bcachefs: Fix null ptr deref in bch2_backpointer_get_node() bch2_btree_iter_peek_node() can return a NULL ptr (when the tree is shorter than the search depth); handle this with an early return. Signed-off-by: Kent Overstreet Reported-by: Dan Carpenter Fixes: https://lore.kernel.org/linux-bcachefs/5fc3c28b-c232-4ec7-b0ac-4ef220ddf976@moroto.mountain/T/ Signed-off-by: Kent Overstreet --- fs/bcachefs/backpointers.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index ef02c9bb0354..23c0834a97a4 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -313,17 +313,17 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans, bp.level - 1, 0); b = bch2_btree_iter_peek_node(iter); - if (IS_ERR(b)) + if (IS_ERR_OR_NULL(b)) goto err; BUG_ON(b->c.level != bp.level - 1); - if (b && extent_matches_bp(c, bp.btree_id, bp.level, - bkey_i_to_s_c(&b->key), - bucket, bp)) + if (extent_matches_bp(c, bp.btree_id, bp.level, + bkey_i_to_s_c(&b->key), + bucket, bp)) return b; - if (b && btree_node_will_make_reachable(b)) { + if (btree_node_will_make_reachable(b)) { b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node); } else { backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key)); From 4d6128dca6d940015fe2aa383ec1a0eeb9632f08 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 6 Nov 2023 11:59:05 -0500 Subject: [PATCH 126/299] bcachefs: Guard against insufficient devices to create stripes We can't create stripes if we don't have enough devices - this manifested as an integer underflow bug later. Signed-off-by: Kent Overstreet --- fs/bcachefs/ec.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 875f7c5a6fca..2a77de18c004 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -1373,6 +1373,15 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, h->nr_active_devs++; rcu_read_unlock(); + + /* + * If we only have redundancy + 1 devices, we're better off with just + * replication: + */ + if (h->nr_active_devs < h->redundancy + 2) + bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?", + h->nr_active_devs, h->redundancy + 2); + list_add(&h->list, &c->ec_stripe_head_list); return h; } @@ -1424,6 +1433,11 @@ __bch2_ec_stripe_head_get(struct btree_trans *trans, h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark); found: + if (!IS_ERR_OR_NULL(h) && + h->nr_active_devs < h->redundancy + 2) { + mutex_unlock(&h->lock); + h = NULL; + } mutex_unlock(&c->ec_stripe_head_lock); return h; } @@ -1681,8 +1695,6 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, int ret; h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark); - if (!h) - bch_err(c, "no stripe head"); if (IS_ERR_OR_NULL(h)) return h; From 1bd5bcc9f5eef968ed021d72b14a157be7abdb49 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 13 Nov 2023 21:44:14 -0500 Subject: [PATCH 127/299] bcachefs: Split out btree_key_cache_types.h More consistent organization. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_key_cache_types.h | 30 +++++++++++++++++++++++++++++ fs/bcachefs/btree_types.h | 27 +------------------------- 2 files changed, 31 insertions(+), 26 deletions(-) create mode 100644 fs/bcachefs/btree_key_cache_types.h diff --git a/fs/bcachefs/btree_key_cache_types.h b/fs/bcachefs/btree_key_cache_types.h new file mode 100644 index 000000000000..0f967808d766 --- /dev/null +++ b/fs/bcachefs/btree_key_cache_types.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_BTREE_KEY_CACHE_TYPES_H +#define _BCACHEFS_BTREE_KEY_CACHE_TYPES_H + +struct btree_key_cache_freelist { + struct bkey_cached *objs[16]; + unsigned nr; +}; + +struct btree_key_cache { + struct mutex lock; + struct rhashtable table; + bool table_init_done; + struct list_head freed_pcpu; + struct list_head freed_nonpcpu; + struct shrinker *shrink; + unsigned shrink_iter; + struct btree_key_cache_freelist __percpu *pcpu_freed; + + atomic_long_t nr_freed; + atomic_long_t nr_keys; + atomic_long_t nr_dirty; +}; + +struct bkey_cached_key { + u32 btree_id; + struct bpos pos; +} __packed __aligned(4); + +#endif /* _BCACHEFS_BTREE_KEY_CACHE_TYPES_H */ diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 941841a0c5bf..be5d6027e796 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -5,7 +5,7 @@ #include #include -//#include "bkey_methods.h" +#include "btree_key_cache_types.h" #include "buckets_types.h" #include "darray.h" #include "errcode.h" @@ -312,31 +312,6 @@ struct btree_iter { #endif }; -struct btree_key_cache_freelist { - struct bkey_cached *objs[16]; - unsigned nr; -}; - -struct btree_key_cache { - struct mutex lock; - struct rhashtable table; - bool table_init_done; - struct list_head freed_pcpu; - struct list_head freed_nonpcpu; - struct shrinker *shrink; - unsigned shrink_iter; - struct btree_key_cache_freelist __percpu *pcpu_freed; - - atomic_long_t nr_freed; - atomic_long_t nr_keys; - atomic_long_t nr_dirty; -}; - -struct bkey_cached_key { - u32 btree_id; - struct bpos pos; -} __packed __aligned(4); - #define BKEY_CACHED_ACCESSED 0 #define BKEY_CACHED_DIRTY 1 From c65c13f0eac61218c9ee4635c05661c0b9760e58 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 6 Nov 2023 09:53:14 -0500 Subject: [PATCH 128/299] bcachefs: Run btree key cache shrinker less aggressively The btree key cache maintains lists of items that have been freed, but can't yet be reclaimed because a bch2_trans_relock() call might find them - we're waiting for SRCU readers to release. Previously, we wouldn't count these items against the number we're attempting to scan for, which would mean we'd evict more live key cache entries - doing quite a bit of potentially unecessary work. With recent work to make sure we don't hold SRCU locks for too long, it should be safe to count all the items on the freelists against number to scan - even if we can't reclaim them yet, we will be able to soon. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_key_cache.c | 23 +++++++++++++++++++---- fs/bcachefs/btree_key_cache_types.h | 4 ++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 9b78f78a75b5..b3305a04d808 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -89,10 +89,13 @@ static void bkey_cached_free(struct btree_key_cache *bc, ck->btree_trans_barrier_seq = start_poll_synchronize_srcu(&c->btree_trans_barrier); - if (ck->c.lock.readers) + if (ck->c.lock.readers) { list_move_tail(&ck->list, &bc->freed_pcpu); - else + bc->nr_freed_pcpu++; + } else { list_move_tail(&ck->list, &bc->freed_nonpcpu); + bc->nr_freed_nonpcpu++; + } atomic_long_inc(&bc->nr_freed); kfree(ck->k); @@ -109,6 +112,8 @@ static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc, { struct bkey_cached *pos; + bc->nr_freed_nonpcpu++; + list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) { if (ULONG_CMP_GE(ck->btree_trans_barrier_seq, pos->btree_trans_barrier_seq)) { @@ -158,6 +163,7 @@ static void bkey_cached_move_to_freelist(struct btree_key_cache *bc, #else mutex_lock(&bc->lock); list_move_tail(&ck->list, &bc->freed_nonpcpu); + bc->nr_freed_nonpcpu++; mutex_unlock(&bc->lock); #endif } else { @@ -217,6 +223,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, f->nr < ARRAY_SIZE(f->objs) / 2) { ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list); list_del_init(&ck->list); + bc->nr_freed_nonpcpu--; f->objs[f->nr++] = ck; } @@ -229,6 +236,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, if (!list_empty(&bc->freed_nonpcpu)) { ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list); list_del_init(&ck->list); + bc->nr_freed_nonpcpu--; } mutex_unlock(&bc->lock); #endif @@ -850,6 +858,8 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, * Newest freed entries are at the end of the list - once we hit one * that's too new to be freed, we can bail out: */ + scanned += bc->nr_freed_nonpcpu; + list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) { if (!poll_state_synchronize_srcu(&c->btree_trans_barrier, ck->btree_trans_barrier_seq)) @@ -859,13 +869,15 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, six_lock_exit(&ck->c.lock); kmem_cache_free(bch2_key_cache, ck); atomic_long_dec(&bc->nr_freed); - scanned++; freed++; + bc->nr_freed_nonpcpu--; } if (scanned >= nr) goto out; + scanned += bc->nr_freed_pcpu; + list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) { if (!poll_state_synchronize_srcu(&c->btree_trans_barrier, ck->btree_trans_barrier_seq)) @@ -875,8 +887,8 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, six_lock_exit(&ck->c.lock); kmem_cache_free(bch2_key_cache, ck); atomic_long_dec(&bc->nr_freed); - scanned++; freed++; + bc->nr_freed_pcpu--; } if (scanned >= nr) @@ -982,6 +994,9 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) } #endif + BUG_ON(list_count_nodes(&bc->freed_pcpu) != bc->nr_freed_pcpu); + BUG_ON(list_count_nodes(&bc->freed_nonpcpu) != bc->nr_freed_nonpcpu); + list_splice(&bc->freed_pcpu, &items); list_splice(&bc->freed_nonpcpu, &items); diff --git a/fs/bcachefs/btree_key_cache_types.h b/fs/bcachefs/btree_key_cache_types.h index 0f967808d766..290e4e57df5b 100644 --- a/fs/bcachefs/btree_key_cache_types.h +++ b/fs/bcachefs/btree_key_cache_types.h @@ -11,8 +11,12 @@ struct btree_key_cache { struct mutex lock; struct rhashtable table; bool table_init_done; + struct list_head freed_pcpu; + size_t nr_freed_pcpu; struct list_head freed_nonpcpu; + size_t nr_freed_nonpcpu; + struct shrinker *shrink; unsigned shrink_iter; struct btree_key_cache_freelist __percpu *pcpu_freed; From 3b8c4507779691984e31e64e0b80abb03cc02d0d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 6 Nov 2023 19:49:47 -0500 Subject: [PATCH 129/299] bcachefs: btree_trans->write_locked As prep work for the next patch to fix a key cache reclaim issue, we need to start tracking whether we're currently holding write locks - so that we can release and retake the before calling into memory reclaim. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_trans_commit.c | 85 ++++++++++++++++++-------------- fs/bcachefs/btree_types.h | 1 + 2 files changed, 50 insertions(+), 36 deletions(-) diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index decad7b66c59..02491f7bb831 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -78,6 +78,53 @@ inline void bch2_btree_node_prep_for_write(struct btree_trans *trans, bch2_btree_init_next(trans, b); } +static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i) +{ + while (--i >= trans->updates) { + if (same_leaf_as_prev(trans, i)) + continue; + + bch2_btree_node_unlock_write(trans, i->path, insert_l(i)->b); + } + + trace_and_count(trans->c, trans_restart_would_deadlock_write, trans); + return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write); +} + +static inline int bch2_trans_lock_write(struct btree_trans *trans) +{ + struct btree_insert_entry *i; + + EBUG_ON(trans->write_locked); + + trans_for_each_update(trans, i) { + if (same_leaf_as_prev(trans, i)) + continue; + + if (bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c)) + return trans_lock_write_fail(trans, i); + + if (!i->cached) + bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b); + } + + trans->write_locked = true; + return 0; +} + +static inline void bch2_trans_unlock_write(struct btree_trans *trans) +{ + if (likely(trans->write_locked)) { + struct btree_insert_entry *i; + + trans_for_each_update(trans, i) + if (!same_leaf_as_prev(trans, i)) + bch2_btree_node_unlock_write_inlined(trans, i->path, + insert_l(i)->b); + trans->write_locked = false; + } +} + /* Inserting into a given leaf node (last stage of insert): */ /* Handle overwrites and do insert, for non extents: */ @@ -732,37 +779,6 @@ revert_fs_usage: return ret; } -static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i) -{ - while (--i >= trans->updates) { - if (same_leaf_as_prev(trans, i)) - continue; - - bch2_btree_node_unlock_write(trans, i->path, insert_l(i)->b); - } - - trace_and_count(trans->c, trans_restart_would_deadlock_write, trans); - return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write); -} - -static inline int trans_lock_write(struct btree_trans *trans) -{ - struct btree_insert_entry *i; - - trans_for_each_update(trans, i) { - if (same_leaf_as_prev(trans, i)) - continue; - - if (bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c)) - return trans_lock_write_fail(trans, i); - - if (!i->cached) - bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b); - } - - return 0; -} - static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans) { struct btree_insert_entry *i; @@ -838,7 +854,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags if (unlikely(ret)) return ret; - ret = trans_lock_write(trans); + ret = bch2_trans_lock_write(trans); if (unlikely(ret)) return ret; @@ -847,10 +863,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags if (!ret && unlikely(trans->journal_replay_not_finished)) bch2_drop_overwrites_from_journal(trans); - trans_for_each_update(trans, i) - if (!same_leaf_as_prev(trans, i)) - bch2_btree_node_unlock_write_inlined(trans, i->path, - insert_l(i)->b); + bch2_trans_unlock_write(trans); if (!ret && trans->journal_pin) bch2_journal_pin_add(&c->journal, trans->journal_res.seq, diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index be5d6027e796..f3669fa68591 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -409,6 +409,7 @@ struct btree_trans { bool journal_transaction_names:1; bool journal_replay_not_finished:1; bool notrace_relock_fail:1; + bool write_locked:1; enum bch_errcode restarted:16; u32 restart_count; unsigned long last_begin_ip; From 09b0283ee23a02094a43a9b93146d1060c58fc3a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 5 Nov 2023 15:28:44 -0500 Subject: [PATCH 130/299] bcachefs: Make sure to drop/retake btree locks before reclaim We really don't want to be invoking memory reclaim with btree locks held: even aside from (solvable, but tricky) recursion issues, it can cause painful to diagnose performance edge cases. This fixes a recently reported issue in btree_key_can_insert_cached(). Signed-off-by: Kent Overstreet Reported-by: Mateusz Guzik Fixes: https://lore.kernel.org/linux-bcachefs/CAGudoHEsb_hGRMeWeXh+UF6po0qQuuq_NKSEo+s1sEb6bDLjpA@mail.gmail.com/T/ --- fs/bcachefs/btree_trans_commit.c | 48 ++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index 02491f7bb831..55a120eb8692 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -368,6 +368,45 @@ static inline int btree_key_can_insert(struct btree_trans *trans, return 0; } +noinline static int +btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags, + struct btree_path *path, unsigned new_u64s) +{ + struct bch_fs *c = trans->c; + struct btree_insert_entry *i; + struct bkey_cached *ck = (void *) path->l[0].b; + struct bkey_i *new_k; + int ret; + + bch2_trans_unlock_write(trans); + bch2_trans_unlock(trans); + + new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL); + if (!new_k) { + bch_err(c, "error allocating memory for key cache key, btree %s u64s %u", + bch2_btree_id_str(path->btree_id), new_u64s); + return -BCH_ERR_ENOMEM_btree_key_cache_insert; + } + + ret = bch2_trans_relock(trans) ?: + bch2_trans_lock_write(trans); + if (unlikely(ret)) { + kfree(new_k); + return ret; + } + + memcpy(new_k, ck->k, ck->u64s * sizeof(u64)); + + trans_for_each_update(trans, i) + if (i->old_v == &ck->k->v) + i->old_v = &new_k->v; + + kfree(ck->k); + ck->u64s = new_u64s; + ck->k = new_k; + return 0; +} + static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags, struct btree_path *path, unsigned u64s) { @@ -394,12 +433,9 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags return 0; new_u64s = roundup_pow_of_two(u64s); - new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS); - if (!new_k) { - bch_err(c, "error allocating memory for key cache key, btree %s u64s %u", - bch2_btree_id_str(path->btree_id), new_u64s); - return -BCH_ERR_ENOMEM_btree_key_cache_insert; - } + new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT); + if (unlikely(!new_k)) + return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s); trans_for_each_update(trans, i) if (i->old_v == &ck->k->v) From 701ff57eb3d7c86c9a53de959e0c48fa8ca446d4 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 3 Nov 2023 18:38:35 -0400 Subject: [PATCH 131/299] bcachefs: Check for nonce offset inconsistency in data_update path We've rarely been seeing a nonce offset inconsistency that doesn't show up in tests: this adds some extra verification code to the data update path that prints out more relevant info when it occurs. Signed-off-by: Kent Overstreet --- fs/bcachefs/data_update.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 0771a6d880bf..5ed66202c226 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -239,6 +239,34 @@ restart_drop_extra_replicas: next_pos = insert->k.p; + /* + * Check for nonce offset inconsistency: + * This is debug code - we've been seeing this bug rarely, and + * it's been hard to reproduce, so this should give us some more + * information when it does occur: + */ + struct printbuf err = PRINTBUF; + int invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), 0, &err); + printbuf_exit(&err); + + if (invalid) { + struct printbuf buf = PRINTBUF; + + prt_str(&buf, "about to insert invalid key in data update path"); + prt_str(&buf, "\nold: "); + bch2_bkey_val_to_text(&buf, c, old); + prt_str(&buf, "\nk: "); + bch2_bkey_val_to_text(&buf, c, k); + prt_str(&buf, "\nnew: "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); + + bch2_print_string_as_lines(KERN_ERR, buf.buf); + printbuf_exit(&buf); + + bch2_fatal_error(c); + goto out; + } + ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id, k.k->p, bkey_start_pos(&insert->k)) ?: bch2_insert_snapshot_whiteouts(trans, m->btree_id, From b28060db7172e6d8912d88b369123eb89e0d36b4 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 12 Nov 2023 11:12:49 +0200 Subject: [PATCH 132/299] ovl: fix misformatted comment Remove misleading /** prefix from a regular comment. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311121628.byHp8tkv-lkp@intel.com/ Signed-off-by: Amir Goldstein --- fs/overlayfs/util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 50a201e9cd39..c3f020ca13a8 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -978,7 +978,7 @@ int ovl_set_protattr(struct inode *inode, struct dentry *upper, return 0; } -/** +/* * Caller must hold a reference to inode to prevent it from being freed while * it is marked inuse. */ From 37f32f52643869131ec01bb69bdf9f404f6109fb Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 12 Nov 2023 10:11:25 +0200 Subject: [PATCH 133/299] ovl: fix memory leak in ovl_parse_param() On failure to parse parameters in ovl_parse_param_lowerdir(), it is necessary to update ctx->nr with the correct nr before using ovl_reset_lowerdirs() to release l->name. Reported-and-tested-by: syzbot+26eedf3631650972f17c@syzkaller.appspotmail.com Fixes: c835110b588a ("ovl: remove unused code in lowerdir param parsing") Co-authored-by: Edward Adam Davis Signed-off-by: Amir Goldstein --- fs/overlayfs/params.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c index ddab9ea267d1..3fe2dde1598f 100644 --- a/fs/overlayfs/params.c +++ b/fs/overlayfs/params.c @@ -430,7 +430,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) struct ovl_fs_context *ctx = fc->fs_private; struct ovl_fs_context_layer *l; char *dup = NULL, *iter; - ssize_t nr_lower = 0, nr = 0, nr_data = 0; + ssize_t nr_lower, nr; bool data_layer = false; /* @@ -482,6 +482,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) iter = dup; l = ctx->lower; for (nr = 0; nr < nr_lower; nr++, l++) { + ctx->nr++; memset(l, 0, sizeof(*l)); err = ovl_mount_dir(iter, &l->path); @@ -498,10 +499,10 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) goto out_put; if (data_layer) - nr_data++; + ctx->nr_data++; /* Calling strchr() again would overrun. */ - if ((nr + 1) == nr_lower) + if (ctx->nr == nr_lower) break; err = -EINVAL; @@ -511,7 +512,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) * This is a regular layer so we require that * there are no data layers. */ - if ((ctx->nr_data + nr_data) > 0) { + if (ctx->nr_data > 0) { pr_err("regular lower layers cannot follow data lower layers"); goto out_put; } @@ -524,8 +525,6 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) data_layer = true; iter++; } - ctx->nr = nr_lower; - ctx->nr_data += nr_data; kfree(dup); return 0; From b944aa9d86d5f782bfe5e51336434c960304839c Mon Sep 17 00:00:00 2001 From: Matus Malych Date: Tue, 14 Nov 2023 14:35:25 +0100 Subject: [PATCH 134/299] ALSA: hda/realtek: Enable Mute LED on HP 255 G10 HP 255 G10 has a mute LED that can be made to work using quirk ALC236_FIXUP_HP_MUTE_LED_COEFBIT2. Enable already existing quirk - at correct line to keep order Signed-off-by: Matus Malych Cc: Link: https://lore.kernel.org/r/20231114133524.11340-1-matus@malych.org Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index cdd808e02b44..3c85b8247c11 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9868,6 +9868,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), From 29954d5b1e0d67a4cd61c30c2201030c97e94b1e Mon Sep 17 00:00:00 2001 From: Shyam Prasad N Date: Tue, 14 Nov 2023 04:54:12 +0000 Subject: [PATCH 135/299] cifs: fix leak of iface for primary channel My last change in this area introduced a change which accounted for primary channel in the interface ref count. However, it did not reduce this ref count on deallocation of the primary channel. i.e. during umount. Fixing this leak here, by dropping this ref count for primary channel while freeing up the session. Fixes: fa1d0508bdd4 ("cifs: account for primary channel in the interface list") Cc: stable@vger.kernel.org Reported-by: Paulo Alcantara Signed-off-by: Shyam Prasad N Signed-off-by: Steve French --- fs/smb/client/connect.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 57c2a7df3457..f896f60c924b 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -2065,6 +2065,12 @@ void __cifs_put_smb_ses(struct cifs_ses *ses) ses->chans[i].server = NULL; } + /* we now account for primary channel in iface->refcount */ + if (ses->chans[0].iface) { + kref_put(&ses->chans[0].iface->refcount, release_iface); + ses->chans[0].server = NULL; + } + sesInfoFree(ses); cifs_put_tcp_session(server, 0); } From 5eef12c4e3230f2025dc46ad8c4a3bc19978e5d7 Mon Sep 17 00:00:00 2001 From: Shyam Prasad N Date: Tue, 14 Nov 2023 04:58:23 +0000 Subject: [PATCH 136/299] cifs: fix lock ordering while disabling multichannel The code to handle the case of server disabling multichannel was picking iface_lock with chan_lock held. This goes against the lock ordering rules, as iface_lock is a higher order lock (even if it isn't so obvious). This change fixes the lock ordering by doing the following in that order for each secondary channel: 1. store iface and server pointers in local variable 2. remove references to iface and server in channels 3. unlock chan_lock 4. lock iface_lock 5. dec ref count for iface 6. unlock iface_lock 7. dec ref count for server 8. lock chan_lock again Since this function can only be called in smb2_reconnect, and that cannot be called by two parallel processes, we should not have races due to dropping chan_lock between steps 3 and 8. Fixes: ee1d21794e55 ("cifs: handle when server stops supporting multichannel") Reported-by: Paulo Alcantara Signed-off-by: Shyam Prasad N Signed-off-by: Steve French --- fs/smb/client/sess.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c index 0bb2ac929061..8b2d7c1ca428 100644 --- a/fs/smb/client/sess.c +++ b/fs/smb/client/sess.c @@ -322,28 +322,32 @@ cifs_disable_secondary_channels(struct cifs_ses *ses) iface = ses->chans[i].iface; server = ses->chans[i].server; + /* + * remove these references first, since we need to unlock + * the chan_lock here, since iface_lock is a higher lock + */ + ses->chans[i].iface = NULL; + ses->chans[i].server = NULL; + spin_unlock(&ses->chan_lock); + if (iface) { spin_lock(&ses->iface_lock); kref_put(&iface->refcount, release_iface); - ses->chans[i].iface = NULL; iface->num_channels--; if (iface->weight_fulfilled) iface->weight_fulfilled--; spin_unlock(&ses->iface_lock); } - spin_unlock(&ses->chan_lock); - if (server && !server->terminate) { - server->terminate = true; - cifs_signal_cifsd_for_reconnect(server, false); - } - spin_lock(&ses->chan_lock); - if (server) { - ses->chans[i].server = NULL; + if (!server->terminate) { + server->terminate = true; + cifs_signal_cifsd_for_reconnect(server, false); + } cifs_put_tcp_session(server, false); } + spin_lock(&ses->chan_lock); } done: From eab03c23c2a162085b13200d7942fc5a00b5ccc8 Mon Sep 17 00:00:00 2001 From: Abel Wu Date: Tue, 7 Nov 2023 17:05:07 +0800 Subject: [PATCH 137/299] sched/eevdf: Fix vruntime adjustment on reweight vruntime of the (on_rq && !0-lag) entity needs to be adjusted when it gets re-weighted, and the calculations can be simplified based on the fact that re-weight won't change the w-average of all the entities. Please check the proofs in comments. But adjusting vruntime can also cause position change in RB-tree hence require re-queue to fix up which might be costly. This might be avoided by deferring adjustment to the time the entity actually leaves tree (dequeue/pick), but that will negatively affect task selection and probably not good enough either. Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy") Signed-off-by: Abel Wu Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231107090510.71322-2-wuyun.abel@bytedance.com --- kernel/sched/fair.c | 151 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 128 insertions(+), 23 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2048138ce54b..025d90925bf6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3666,41 +3666,140 @@ static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } #endif +static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se, + unsigned long weight) +{ + unsigned long old_weight = se->load.weight; + u64 avruntime = avg_vruntime(cfs_rq); + s64 vlag, vslice; + + /* + * VRUNTIME + * ======== + * + * COROLLARY #1: The virtual runtime of the entity needs to be + * adjusted if re-weight at !0-lag point. + * + * Proof: For contradiction assume this is not true, so we can + * re-weight without changing vruntime at !0-lag point. + * + * Weight VRuntime Avg-VRuntime + * before w v V + * after w' v' V' + * + * Since lag needs to be preserved through re-weight: + * + * lag = (V - v)*w = (V'- v')*w', where v = v' + * ==> V' = (V - v)*w/w' + v (1) + * + * Let W be the total weight of the entities before reweight, + * since V' is the new weighted average of entities: + * + * V' = (WV + w'v - wv) / (W + w' - w) (2) + * + * by using (1) & (2) we obtain: + * + * (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v + * ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v + * ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v + * ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3) + * + * Since we are doing at !0-lag point which means V != v, we + * can simplify (3): + * + * ==> W / (W + w' - w) = w / w' + * ==> Ww' = Ww + ww' - ww + * ==> W * (w' - w) = w * (w' - w) + * ==> W = w (re-weight indicates w' != w) + * + * So the cfs_rq contains only one entity, hence vruntime of + * the entity @v should always equal to the cfs_rq's weighted + * average vruntime @V, which means we will always re-weight + * at 0-lag point, thus breach assumption. Proof completed. + * + * + * COROLLARY #2: Re-weight does NOT affect weighted average + * vruntime of all the entities. + * + * Proof: According to corollary #1, Eq. (1) should be: + * + * (V - v)*w = (V' - v')*w' + * ==> v' = V' - (V - v)*w/w' (4) + * + * According to the weighted average formula, we have: + * + * V' = (WV - wv + w'v') / (W - w + w') + * = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w') + * = (WV - wv + w'V' - Vw + wv) / (W - w + w') + * = (WV + w'V' - Vw) / (W - w + w') + * + * ==> V'*(W - w + w') = WV + w'V' - Vw + * ==> V' * (W - w) = (W - w) * V (5) + * + * If the entity is the only one in the cfs_rq, then reweight + * always occurs at 0-lag point, so V won't change. Or else + * there are other entities, hence W != w, then Eq. (5) turns + * into V' = V. So V won't change in either case, proof done. + * + * + * So according to corollary #1 & #2, the effect of re-weight + * on vruntime should be: + * + * v' = V' - (V - v) * w / w' (4) + * = V - (V - v) * w / w' + * = V - vl * w / w' + * = V - vl' + */ + if (avruntime != se->vruntime) { + vlag = (s64)(avruntime - se->vruntime); + vlag = div_s64(vlag * old_weight, weight); + se->vruntime = avruntime - vlag; + } + + /* + * DEADLINE + * ======== + * + * When the weight changes, the virtual time slope changes and + * we should adjust the relative virtual deadline accordingly. + * + * d' = v' + (d - v)*w/w' + * = V' - (V - v)*w/w' + (d - v)*w/w' + * = V - (V - v)*w/w' + (d - v)*w/w' + * = V + (d - V)*w/w' + */ + vslice = (s64)(se->deadline - avruntime); + vslice = div_s64(vslice * old_weight, weight); + se->deadline = avruntime + vslice; +} + static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight) { - unsigned long old_weight = se->load.weight; + bool curr = cfs_rq->curr == se; if (se->on_rq) { /* commit outstanding execution time */ - if (cfs_rq->curr == se) + if (curr) update_curr(cfs_rq); else - avg_vruntime_sub(cfs_rq, se); + __dequeue_entity(cfs_rq, se); update_load_sub(&cfs_rq->load, se->load.weight); } dequeue_load_avg(cfs_rq, se); - update_load_set(&se->load, weight); - if (!se->on_rq) { /* * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i), * we need to scale se->vlag when w_i changes. */ - se->vlag = div_s64(se->vlag * old_weight, weight); + se->vlag = div_s64(se->vlag * se->load.weight, weight); } else { - s64 deadline = se->deadline - se->vruntime; - /* - * When the weight changes, the virtual time slope changes and - * we should adjust the relative virtual deadline accordingly. - */ - deadline = div_s64(deadline * old_weight, weight); - se->deadline = se->vruntime + deadline; - if (se != cfs_rq->curr) - min_deadline_cb_propagate(&se->run_node, NULL); + reweight_eevdf(cfs_rq, se, weight); } + update_load_set(&se->load, weight); + #ifdef CONFIG_SMP do { u32 divider = get_pelt_divider(&se->avg); @@ -3712,8 +3811,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, enqueue_load_avg(cfs_rq, se); if (se->on_rq) { update_load_add(&cfs_rq->load, se->load.weight); - if (cfs_rq->curr != se) - avg_vruntime_add(cfs_rq, se); + if (!curr) { + /* + * The entity's vruntime has been adjusted, so let's check + * whether the rq-wide min_vruntime needs updated too. Since + * the calculations above require stable min_vruntime rather + * than up-to-date one, we do the update at the end of the + * reweight process. + */ + __enqueue_entity(cfs_rq, se); + update_min_vruntime(cfs_rq); + } } } @@ -3857,14 +3965,11 @@ static void update_cfs_group(struct sched_entity *se) #ifndef CONFIG_SMP shares = READ_ONCE(gcfs_rq->tg->shares); - - if (likely(se->load.weight == shares)) - return; #else - shares = calc_group_shares(gcfs_rq); + shares = calc_group_shares(gcfs_rq); #endif - - reweight_entity(cfs_rq_of(se), se, shares); + if (unlikely(se->load.weight != shares)) + reweight_entity(cfs_rq_of(se), se, shares); } #else /* CONFIG_FAIR_GROUP_SCHED */ From 8b39d20eceeda6c4eb23df1497f9ed2fffdc8f69 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 26 Oct 2023 12:41:14 -0400 Subject: [PATCH 138/299] sched: psi: fix unprivileged polling against cgroups 519fabc7aaba ("psi: remove 500ms min window size limitation for triggers") breaks unprivileged psi polling on cgroups. Historically, we had a privilege check for polling in the open() of a pressure file in /proc, but were erroneously missing it for the open() of cgroup pressure files. When unprivileged polling was introduced in d82caa273565 ("sched/psi: Allow unprivileged polling of N*2s period"), it needed to filter privileges depending on the exact polling parameters, and as such moved the CAP_SYS_RESOURCE check from the proc open() callback to psi_trigger_create(). Both the proc files as well as cgroup files go through this during write(). This implicitly added the missing check for privileges required for HT polling for cgroups. When 519fabc7aaba ("psi: remove 500ms min window size limitation for triggers") followed right after to remove further restrictions on the RT polling window, it incorrectly assumed the cgroup privilege check was still missing and added it to the cgroup open(), mirroring what we used to do for proc files in the past. As a result, unprivileged poll requests that would be supported now get rejected when opening the cgroup pressure file for writing. Remove the cgroup open() check. psi_trigger_create() handles it. Fixes: 519fabc7aaba ("psi: remove 500ms min window size limitation for triggers") Reported-by: Luca Boccassi Signed-off-by: Johannes Weiner Signed-off-by: Peter Zijlstra (Intel) Acked-by: Luca Boccassi Acked-by: Suren Baghdasaryan Cc: stable@vger.kernel.org # 6.5+ Link: https://lore.kernel.org/r/20231026164114.2488682-1-hannes@cmpxchg.org --- kernel/cgroup/cgroup.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 1d5b9de3b1b9..4b9ff41ca603 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -3885,14 +3885,6 @@ static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of, return psi_trigger_poll(&ctx->psi.trigger, of->file, pt); } -static int cgroup_pressure_open(struct kernfs_open_file *of) -{ - if (of->file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE)) - return -EPERM; - - return 0; -} - static void cgroup_pressure_release(struct kernfs_open_file *of) { struct cgroup_file_ctx *ctx = of->priv; @@ -5299,7 +5291,6 @@ static struct cftype cgroup_psi_files[] = { { .name = "io.pressure", .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]), - .open = cgroup_pressure_open, .seq_show = cgroup_io_pressure_show, .write = cgroup_io_pressure_write, .poll = cgroup_pressure_poll, @@ -5308,7 +5299,6 @@ static struct cftype cgroup_psi_files[] = { { .name = "memory.pressure", .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]), - .open = cgroup_pressure_open, .seq_show = cgroup_memory_pressure_show, .write = cgroup_memory_pressure_write, .poll = cgroup_pressure_poll, @@ -5317,7 +5307,6 @@ static struct cftype cgroup_psi_files[] = { { .name = "cpu.pressure", .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]), - .open = cgroup_pressure_open, .seq_show = cgroup_cpu_pressure_show, .write = cgroup_cpu_pressure_write, .poll = cgroup_pressure_poll, @@ -5327,7 +5316,6 @@ static struct cftype cgroup_psi_files[] = { { .name = "irq.pressure", .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]), - .open = cgroup_pressure_open, .seq_show = cgroup_irq_pressure_show, .write = cgroup_irq_pressure_write, .poll = cgroup_pressure_poll, From 6d7e4782bcf549221b4ccfffec2cf4d1a473f1a3 Mon Sep 17 00:00:00 2001 From: Keisuke Nishimura Date: Tue, 31 Oct 2023 14:38:22 +0100 Subject: [PATCH 139/299] sched/fair: Fix the decision for load balance should_we_balance is called for the decision to do load-balancing. When sched ticks invoke this function, only one CPU should return true. However, in the current code, two CPUs can return true. The following situation, where b means busy and i means idle, is an example, because CPU 0 and CPU 2 return true. [0, 1] [2, 3] b b i b This fix checks if there exists an idle CPU with busy sibling(s) after looking for a CPU on an idle core. If some idle CPUs with busy siblings are found, just the first one should do load-balancing. Fixes: b1bfeab9b002 ("sched/fair: Consider the idle state of the whole core for load balance") Signed-off-by: Keisuke Nishimura Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Chen Yu Reviewed-by: Shrikanth Hegde Reviewed-by: Vincent Guittot Link: https://lkml.kernel.org/r/20231031133821.1570861-1-keisuke.nishimura@inria.fr --- kernel/sched/fair.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 025d90925bf6..d7a3c63a2171 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11184,12 +11184,16 @@ static int should_we_balance(struct lb_env *env) continue; } - /* Are we the first idle CPU? */ + /* + * Are we the first idle core in a non-SMT domain or higher, + * or the first idle CPU in a SMT domain? + */ return cpu == env->dst_cpu; } - if (idle_smt == env->dst_cpu) - return true; + /* Are we the first idle CPU with busy siblings? */ + if (idle_smt != -1) + return idle_smt == env->dst_cpu; /* Are we the first CPU of this group ? */ return group_balance_cpu(sg) == env->dst_cpu; From 09f12bf9f790052710bd6e48a1fc1bc4d9e17389 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 7 Nov 2023 18:18:31 +0300 Subject: [PATCH 140/299] nouveau/gsp/r535: uninitialized variable in r535_gsp_acpi_mux_id() The if we hit the "continue" statement on the first iteration through the loop then "handle_mux" needs to be set to NULL so we continue looping. Fixes: 176fdcbddfd2 ("drm/nouveau/gsp/r535: add support for booting GSP-RM") Signed-off-by: Dan Carpenter Reviewed-by: Danilo Krummrich Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/1d864f6e-43e9-43d8-9d90-30e76c9c843b@moroto.mountain --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index e31f9641114b..afa8e7377a76 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -1159,7 +1159,7 @@ static void r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, MUX_METHOD_DATA_ELEMENT *part) { - acpi_handle iter = NULL, handle_mux; + acpi_handle iter = NULL, handle_mux = NULL; acpi_status status; unsigned long long value; From 42bd415bd8bd43721d423930b4695c565661e687 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 8 Nov 2023 10:40:21 +0300 Subject: [PATCH 141/299] nouveau/gsp/r535: Fix a NULL vs error pointer bug The r535_gsp_cmdq_get() function returns error pointers but this code checks for NULL. Also we need to propagate the error pointer back to the callers in r535_gsp_rpc_get(). Returning NULL will lead to a NULL pointer dereference. Fixes: 176fdcbddfd2 ("drm/nouveau/gsp/r535: add support for booting GSP-RM") Signed-off-by: Dan Carpenter Reviewed-by: Danilo Krummrich Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/f71996d9-d1cb-45ea-a4b2-2dfc21312d8c@kili.mountain --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index afa8e7377a76..dc44f5c7833f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -689,8 +689,8 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) struct nvfw_gsp_rpc *rpc; rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64))); - if (!rpc) - return NULL; + if (IS_ERR(rpc)) + return ERR_CAST(rpc); rpc->header_version = 0x03000000; rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; From a2e36cd56041e277d7d81d35638fd8d9731e21f5 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 7 Nov 2023 15:32:55 +1000 Subject: [PATCH 142/299] nouveau: use an rwlock for the event lock. This allows it to break the following circular locking dependency. Aug 10 07:01:29 dg1test kernel: ====================================================== Aug 10 07:01:29 dg1test kernel: WARNING: possible circular locking dependency detected Aug 10 07:01:29 dg1test kernel: 6.4.0-rc7+ #10 Not tainted Aug 10 07:01:29 dg1test kernel: ------------------------------------------------------ Aug 10 07:01:29 dg1test kernel: wireplumber/2236 is trying to acquire lock: Aug 10 07:01:29 dg1test kernel: ffff8fca5320da18 (&fctx->lock){-...}-{2:2}, at: nouveau_fence_wait_uevent_handler+0x2b/0x100 [nouveau] Aug 10 07:01:29 dg1test kernel: but task is already holding lock: Aug 10 07:01:29 dg1test kernel: ffff8fca41208610 (&event->list_lock#2){-...}-{2:2}, at: nvkm_event_ntfy+0x50/0xf0 [nouveau] Aug 10 07:01:29 dg1test kernel: which lock already depends on the new lock. Aug 10 07:01:29 dg1test kernel: the existing dependency chain (in reverse order) is: Aug 10 07:01:29 dg1test kernel: -> #3 (&event->list_lock#2){-...}-{2:2}: Aug 10 07:01:29 dg1test kernel: _raw_spin_lock_irqsave+0x4b/0x70 Aug 10 07:01:29 dg1test kernel: nvkm_event_ntfy+0x50/0xf0 [nouveau] Aug 10 07:01:29 dg1test kernel: ga100_fifo_nonstall_intr+0x24/0x30 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_intr+0x12c/0x240 [nouveau] Aug 10 07:01:29 dg1test kernel: __handle_irq_event_percpu+0x88/0x240 Aug 10 07:01:29 dg1test kernel: handle_irq_event+0x38/0x80 Aug 10 07:01:29 dg1test kernel: handle_edge_irq+0xa3/0x240 Aug 10 07:01:29 dg1test kernel: __common_interrupt+0x72/0x160 Aug 10 07:01:29 dg1test kernel: common_interrupt+0x60/0xe0 Aug 10 07:01:29 dg1test kernel: asm_common_interrupt+0x26/0x40 Aug 10 07:01:29 dg1test kernel: -> #2 (&device->intr.lock){-...}-{2:2}: Aug 10 07:01:29 dg1test kernel: _raw_spin_lock_irqsave+0x4b/0x70 Aug 10 07:01:29 dg1test kernel: nvkm_inth_allow+0x2c/0x80 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_event_ntfy_state+0x181/0x250 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_event_ntfy_allow+0x63/0xd0 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_uevent_mthd+0x4d/0x70 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_ioctl+0x10b/0x250 [nouveau] Aug 10 07:01:29 dg1test kernel: nvif_object_mthd+0xa8/0x1f0 [nouveau] Aug 10 07:01:29 dg1test kernel: nvif_event_allow+0x2a/0xa0 [nouveau] Aug 10 07:01:29 dg1test kernel: nouveau_fence_enable_signaling+0x78/0x80 [nouveau] Aug 10 07:01:29 dg1test kernel: __dma_fence_enable_signaling+0x5e/0x100 Aug 10 07:01:29 dg1test kernel: dma_fence_add_callback+0x4b/0xd0 Aug 10 07:01:29 dg1test kernel: nouveau_cli_work_queue+0xae/0x110 [nouveau] Aug 10 07:01:29 dg1test kernel: nouveau_gem_object_close+0x1d1/0x2a0 [nouveau] Aug 10 07:01:29 dg1test kernel: drm_gem_handle_delete+0x70/0xe0 [drm] Aug 10 07:01:29 dg1test kernel: drm_ioctl_kernel+0xa5/0x150 [drm] Aug 10 07:01:29 dg1test kernel: drm_ioctl+0x256/0x490 [drm] Aug 10 07:01:29 dg1test kernel: nouveau_drm_ioctl+0x5a/0xb0 [nouveau] Aug 10 07:01:29 dg1test kernel: __x64_sys_ioctl+0x91/0xd0 Aug 10 07:01:29 dg1test kernel: do_syscall_64+0x3c/0x90 Aug 10 07:01:29 dg1test kernel: entry_SYSCALL_64_after_hwframe+0x72/0xdc Aug 10 07:01:29 dg1test kernel: -> #1 (&event->refs_lock#4){....}-{2:2}: Aug 10 07:01:29 dg1test kernel: _raw_spin_lock_irqsave+0x4b/0x70 Aug 10 07:01:29 dg1test kernel: nvkm_event_ntfy_state+0x37/0x250 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_event_ntfy_allow+0x63/0xd0 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_uevent_mthd+0x4d/0x70 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_ioctl+0x10b/0x250 [nouveau] Aug 10 07:01:29 dg1test kernel: nvif_object_mthd+0xa8/0x1f0 [nouveau] Aug 10 07:01:29 dg1test kernel: nvif_event_allow+0x2a/0xa0 [nouveau] Aug 10 07:01:29 dg1test kernel: nouveau_fence_enable_signaling+0x78/0x80 [nouveau] Aug 10 07:01:29 dg1test kernel: __dma_fence_enable_signaling+0x5e/0x100 Aug 10 07:01:29 dg1test kernel: dma_fence_add_callback+0x4b/0xd0 Aug 10 07:01:29 dg1test kernel: nouveau_cli_work_queue+0xae/0x110 [nouveau] Aug 10 07:01:29 dg1test kernel: nouveau_gem_object_close+0x1d1/0x2a0 [nouveau] Aug 10 07:01:29 dg1test kernel: drm_gem_handle_delete+0x70/0xe0 [drm] Aug 10 07:01:29 dg1test kernel: drm_ioctl_kernel+0xa5/0x150 [drm] Aug 10 07:01:29 dg1test kernel: drm_ioctl+0x256/0x490 [drm] Aug 10 07:01:29 dg1test kernel: nouveau_drm_ioctl+0x5a/0xb0 [nouveau] Aug 10 07:01:29 dg1test kernel: __x64_sys_ioctl+0x91/0xd0 Aug 10 07:01:29 dg1test kernel: do_syscall_64+0x3c/0x90 Aug 10 07:01:29 dg1test kernel: entry_SYSCALL_64_after_hwframe+0x72/0xdc Aug 10 07:01:29 dg1test kernel: -> #0 (&fctx->lock){-...}-{2:2}: Aug 10 07:01:29 dg1test kernel: __lock_acquire+0x14e3/0x2240 Aug 10 07:01:29 dg1test kernel: lock_acquire+0xc8/0x2a0 Aug 10 07:01:29 dg1test kernel: _raw_spin_lock_irqsave+0x4b/0x70 Aug 10 07:01:29 dg1test kernel: nouveau_fence_wait_uevent_handler+0x2b/0x100 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_client_event+0xf/0x20 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_event_ntfy+0x9b/0xf0 [nouveau] Aug 10 07:01:29 dg1test kernel: ga100_fifo_nonstall_intr+0x24/0x30 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_intr+0x12c/0x240 [nouveau] Aug 10 07:01:29 dg1test kernel: __handle_irq_event_percpu+0x88/0x240 Aug 10 07:01:29 dg1test kernel: handle_irq_event+0x38/0x80 Aug 10 07:01:29 dg1test kernel: handle_edge_irq+0xa3/0x240 Aug 10 07:01:29 dg1test kernel: __common_interrupt+0x72/0x160 Aug 10 07:01:29 dg1test kernel: common_interrupt+0x60/0xe0 Aug 10 07:01:29 dg1test kernel: asm_common_interrupt+0x26/0x40 Aug 10 07:01:29 dg1test kernel: other info that might help us debug this: Aug 10 07:01:29 dg1test kernel: Chain exists of: &fctx->lock --> &device->intr.lock --> &event->list_lock#2 Aug 10 07:01:29 dg1test kernel: Possible unsafe locking scenario: Aug 10 07:01:29 dg1test kernel: CPU0 CPU1 Aug 10 07:01:29 dg1test kernel: ---- ---- Aug 10 07:01:29 dg1test kernel: lock(&event->list_lock#2); Aug 10 07:01:29 dg1test kernel: lock(&device->intr.lock); Aug 10 07:01:29 dg1test kernel: lock(&event->list_lock#2); Aug 10 07:01:29 dg1test kernel: lock(&fctx->lock); Aug 10 07:01:29 dg1test kernel: *** DEADLOCK *** Aug 10 07:01:29 dg1test kernel: 2 locks held by wireplumber/2236: Aug 10 07:01:29 dg1test kernel: #0: ffff8fca53177bf8 (&device->intr.lock){-...}-{2:2}, at: nvkm_intr+0x29/0x240 [nouveau] Aug 10 07:01:29 dg1test kernel: #1: ffff8fca41208610 (&event->list_lock#2){-...}-{2:2}, at: nvkm_event_ntfy+0x50/0xf0 [nouveau] Aug 10 07:01:29 dg1test kernel: stack backtrace: Aug 10 07:01:29 dg1test kernel: CPU: 6 PID: 2236 Comm: wireplumber Not tainted 6.4.0-rc7+ #10 Aug 10 07:01:29 dg1test kernel: Hardware name: Gigabyte Technology Co., Ltd. Z390 I AORUS PRO WIFI/Z390 I AORUS PRO WIFI-CF, BIOS F8 11/05/2021 Aug 10 07:01:29 dg1test kernel: Call Trace: Aug 10 07:01:29 dg1test kernel: Aug 10 07:01:29 dg1test kernel: dump_stack_lvl+0x5b/0x90 Aug 10 07:01:29 dg1test kernel: check_noncircular+0xe2/0x110 Aug 10 07:01:29 dg1test kernel: __lock_acquire+0x14e3/0x2240 Aug 10 07:01:29 dg1test kernel: lock_acquire+0xc8/0x2a0 Aug 10 07:01:29 dg1test kernel: ? nouveau_fence_wait_uevent_handler+0x2b/0x100 [nouveau] Aug 10 07:01:29 dg1test kernel: ? lock_acquire+0xc8/0x2a0 Aug 10 07:01:29 dg1test kernel: _raw_spin_lock_irqsave+0x4b/0x70 Aug 10 07:01:29 dg1test kernel: ? nouveau_fence_wait_uevent_handler+0x2b/0x100 [nouveau] Aug 10 07:01:29 dg1test kernel: nouveau_fence_wait_uevent_handler+0x2b/0x100 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_client_event+0xf/0x20 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_event_ntfy+0x9b/0xf0 [nouveau] Aug 10 07:01:29 dg1test kernel: ga100_fifo_nonstall_intr+0x24/0x30 [nouveau] Aug 10 07:01:29 dg1test kernel: nvkm_intr+0x12c/0x240 [nouveau] Aug 10 07:01:29 dg1test kernel: __handle_irq_event_percpu+0x88/0x240 Aug 10 07:01:29 dg1test kernel: handle_irq_event+0x38/0x80 Aug 10 07:01:29 dg1test kernel: handle_edge_irq+0xa3/0x240 Aug 10 07:01:29 dg1test kernel: __common_interrupt+0x72/0x160 Aug 10 07:01:29 dg1test kernel: common_interrupt+0x60/0xe0 Aug 10 07:01:29 dg1test kernel: asm_common_interrupt+0x26/0x40 Aug 10 07:01:29 dg1test kernel: RIP: 0033:0x7fb66174d700 Aug 10 07:01:29 dg1test kernel: Code: c1 e2 05 29 ca 8d 0c 10 0f be 07 84 c0 75 eb 89 c8 c3 0f 1f 84 00 00 00 00 00 f3 0f 1e fa e9 d7 0f fc ff 0f 1f 80 00 00 00 00 0f 1e fa e9 c7 0f fc> Aug 10 07:01:29 dg1test kernel: RSP: 002b:00007ffdd3c48438 EFLAGS: 00000206 Aug 10 07:01:29 dg1test kernel: RAX: 000055bb758763c0 RBX: 000055bb758752c0 RCX: 00000000000028b0 Aug 10 07:01:29 dg1test kernel: RDX: 000055bb758752c0 RSI: 000055bb75887490 RDI: 000055bb75862950 Aug 10 07:01:29 dg1test kernel: RBP: 00007ffdd3c48490 R08: 000055bb75873b10 R09: 0000000000000001 Aug 10 07:01:29 dg1test kernel: R10: 0000000000000004 R11: 000055bb7587f000 R12: 000055bb75887490 Aug 10 07:01:29 dg1test kernel: R13: 000055bb757f6280 R14: 000055bb758875c0 R15: 000055bb757f6280 Aug 10 07:01:29 dg1test kernel: Signed-off-by: Dave Airlie Tested-by: Danilo Krummrich Reviewed-by: Danilo Krummrich Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231107053255.2257079-1-airlied@gmail.com --- drivers/gpu/drm/nouveau/include/nvkm/core/event.h | 4 ++-- drivers/gpu/drm/nouveau/nvkm/core/event.c | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h index 82b267c11147..460459af272d 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h @@ -14,7 +14,7 @@ struct nvkm_event { int index_nr; spinlock_t refs_lock; - spinlock_t list_lock; + rwlock_t list_lock; int *refs; struct list_head ntfy; @@ -38,7 +38,7 @@ nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *subdev, int types_nr, int index_nr, struct nvkm_event *event) { spin_lock_init(&event->refs_lock); - spin_lock_init(&event->list_lock); + rwlock_init(&event->list_lock); return __nvkm_event_init(func, subdev, types_nr, index_nr, event); } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c index a6c877135598..61fed7792e41 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/event.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c @@ -81,17 +81,17 @@ nvkm_event_ntfy_state(struct nvkm_event_ntfy *ntfy) static void nvkm_event_ntfy_remove(struct nvkm_event_ntfy *ntfy) { - spin_lock_irq(&ntfy->event->list_lock); + write_lock_irq(&ntfy->event->list_lock); list_del_init(&ntfy->head); - spin_unlock_irq(&ntfy->event->list_lock); + write_unlock_irq(&ntfy->event->list_lock); } static void nvkm_event_ntfy_insert(struct nvkm_event_ntfy *ntfy) { - spin_lock_irq(&ntfy->event->list_lock); + write_lock_irq(&ntfy->event->list_lock); list_add_tail(&ntfy->head, &ntfy->event->ntfy); - spin_unlock_irq(&ntfy->event->list_lock); + write_unlock_irq(&ntfy->event->list_lock); } static void @@ -176,7 +176,7 @@ nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits) return; nvkm_trace(event->subdev, "event: ntfy %08x on %d\n", bits, id); - spin_lock_irqsave(&event->list_lock, flags); + read_lock_irqsave(&event->list_lock, flags); list_for_each_entry_safe(ntfy, ntmp, &event->ntfy, head) { if (ntfy->id == id && ntfy->bits & bits) { @@ -185,7 +185,7 @@ nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits) } } - spin_unlock_irqrestore(&event->list_lock, flags); + read_unlock_irqrestore(&event->list_lock, flags); } void From 969d90ec212bae4b45bf9d21d7daa30aa6cf055e Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Tue, 14 Nov 2023 17:25:48 -0500 Subject: [PATCH 143/299] audit: don't WARN_ON_ONCE(!current->mm) in audit_exe_compare() eBPF can end up calling into the audit code from some odd places, and some of these places don't have @current set properly so we end up tripping the `WARN_ON_ONCE(!current->mm)` near the top of `audit_exe_compare()`. While the basic `!current->mm` check is good, the `WARN_ON_ONCE()` results in some scary console messages so let's drop that and just do the regular `!current->mm` check to avoid problems. Cc: Fixes: 47846d51348d ("audit: don't take task_lock() in audit_exe_compare() code path") Reported-by: Artem Savkov Signed-off-by: Paul Moore --- kernel/audit_watch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 91e82e34b51e..7a98cd176a12 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -531,7 +531,7 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark) if (tsk != current) return 0; - if (WARN_ON_ONCE(!current->mm)) + if (!current->mm) return 0; exe_file = get_mm_exe_file(current->mm); if (!exe_file) From c9bd1568d5462f4108417518ce1af7b924acfb6f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 14 Nov 2023 21:36:13 +0100 Subject: [PATCH 144/299] futex: Fix hardcoded flags Xi reported that commit 5694289ce183 ("futex: Flag conversion") broke glibc's robust futex tests. This was narrowed down to the change of FLAGS_SHARED from 0x01 to 0x10, at which point Florian noted that handle_futex_death() has a hardcoded flags argument of 1. Change this to: FLAGS_SIZE_32 | FLAGS_SHARED, matching how futex_to_flags() unconditionally sets FLAGS_SIZE_32 for all legacy futex ops. Reported-by: Xi Ruoyao Reported-by: Florian Weimer Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lkml.kernel.org/r/20231114201402.GA25315@noisy.programming.kicks-ass.net Fixes: 5694289ce183 ("futex: Flag conversion") Cc: --- kernel/futex/core.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 52695c59d041..dad981a865b8 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -700,7 +700,8 @@ retry: owner = uval & FUTEX_TID_MASK; if (pending_op && !pi && !owner) { - futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); + futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, + FUTEX_BITSET_MATCH_ANY); return 0; } @@ -752,8 +753,10 @@ retry: * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state(): */ - if (!pi && (uval & FUTEX_WAITERS)) - futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); + if (!pi && (uval & FUTEX_WAITERS)) { + futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, + FUTEX_BITSET_MATCH_ANY); + } return 0; } From 889c58b3155ff4c8e8671c95daef63d6fabbb6b1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Jun 2023 12:34:46 +0200 Subject: [PATCH 145/299] perf/core: Fix cpuctx refcounting Audit of the refcounting turned up that perf_pmu_migrate_context() fails to migrate the ctx refcount. Fixes: bd2756811766 ("perf: Rewrite core context handling") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lkml.kernel.org/r/20230612093539.085862001@infradead.org Cc: --- include/linux/perf_event.h | 13 ++++++++----- kernel/events/core.c | 17 +++++++++++++++++ 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index afb028c54f33..5547ba68e6e4 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -843,11 +843,11 @@ struct perf_event { }; /* - * ,-----------------------[1:n]----------------------. - * V V - * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event - * ^ ^ | | - * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-' + * ,-----------------------[1:n]------------------------. + * V V + * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event + * | | + * `--[n:1]-> pmu <-[1:n]--' * * * struct perf_event_pmu_context lifetime is refcount based and RCU freed @@ -865,6 +865,9 @@ struct perf_event { * ctx->mutex pinning the configuration. Since we hold a reference on * group_leader (through the filedesc) it can't go away, therefore it's * associated pmu_ctx must exist and cannot change due to ctx->mutex. + * + * perf_event holds a refcount on perf_event_context + * perf_event holds a refcount on perf_event_pmu_context */ struct perf_event_pmu_context { struct pmu *pmu; diff --git a/kernel/events/core.c b/kernel/events/core.c index 683dc086ef10..b704d83a28b2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4828,6 +4828,11 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, void *task_ctx_data = NULL; if (!ctx->task) { + /* + * perf_pmu_migrate_context() / __perf_pmu_install_event() + * relies on the fact that find_get_pmu_context() cannot fail + * for CPU contexts. + */ struct perf_cpu_pmu_context *cpc; cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); @@ -12889,6 +12894,9 @@ static void __perf_pmu_install_event(struct pmu *pmu, int cpu, struct perf_event *event) { struct perf_event_pmu_context *epc; + struct perf_event_context *old_ctx = event->ctx; + + get_ctx(ctx); /* normally find_get_context() */ event->cpu = cpu; epc = find_get_pmu_context(pmu, ctx, event); @@ -12897,6 +12905,11 @@ static void __perf_pmu_install_event(struct pmu *pmu, if (event->state >= PERF_EVENT_STATE_OFF) event->state = PERF_EVENT_STATE_INACTIVE; perf_install_in_context(ctx, event, cpu); + + /* + * Now that event->ctx is updated and visible, put the old ctx. + */ + put_ctx(old_ctx); } static void __perf_pmu_install(struct perf_event_context *ctx, @@ -12935,6 +12948,10 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) struct perf_event_context *src_ctx, *dst_ctx; LIST_HEAD(events); + /* + * Since per-cpu context is persistent, no need to grab an extra + * reference. + */ src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx; dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx; From 006ccc3090e2f30f5f97857f3946312692a5279e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 4 Nov 2023 22:54:26 -0400 Subject: [PATCH 146/299] bcachefs: Kill journal pre-reservations This deletes the complicated and somewhat expensive journal pre-reservation machinery in favor of just using journal watermarks: when the journal is more than half full, we run journal reclaim more aggressively, and when the journal is more than 3/4s full we only allow journal reclaim to get new journal reservations. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_iter.c | 2 - fs/bcachefs/btree_key_cache.c | 14 ----- fs/bcachefs/btree_trans_commit.c | 36 +---------- fs/bcachefs/btree_types.h | 3 - fs/bcachefs/btree_update_interior.c | 30 --------- fs/bcachefs/btree_update_interior.h | 1 - fs/bcachefs/journal.c | 31 --------- fs/bcachefs/journal.h | 98 ----------------------------- fs/bcachefs/journal_reclaim.c | 42 +++++-------- fs/bcachefs/journal_types.h | 26 -------- fs/bcachefs/trace.h | 11 +--- 11 files changed, 19 insertions(+), 275 deletions(-) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index c2adf3fbb0b3..6fa90bcd7016 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -3087,8 +3087,6 @@ void bch2_trans_put(struct btree_trans *trans) srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); } - bch2_journal_preres_put(&c->journal, &trans->journal_preres); - kfree(trans->extra_journal_entries.data); if (trans->fs_usage_deltas) { diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index b3305a04d808..37fbf22de8fc 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -672,7 +672,6 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, goto out; bch2_journal_pin_drop(j, &ck->journal); - bch2_journal_preres_put(j, &ck->res); BUG_ON(!btree_node_locked(c_iter.path, 0)); @@ -770,18 +769,6 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans, BUG_ON(insert->k.u64s > ck->u64s); - if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) { - int difference; - - BUG_ON(jset_u64s(insert->k.u64s) > trans->journal_preres.u64s); - - difference = jset_u64s(insert->k.u64s) - ck->res.u64s; - if (difference > 0) { - trans->journal_preres.u64s -= difference; - ck->res.u64s += difference; - } - } - bkey_copy(ck->k, insert); ck->valid = true; @@ -1006,7 +993,6 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) cond_resched(); bch2_journal_pin_drop(&c->journal, &ck->journal); - bch2_journal_preres_put(&c->journal, &ck->res); list_del(&ck->list); kfree(ck->k); diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index 55a120eb8692..12907beda98c 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -323,17 +323,6 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans, bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot)); } -static noinline int -bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned flags, - unsigned long trace_ip) -{ - return drop_locks_do(trans, - bch2_journal_preres_get(&trans->c->journal, - &trans->journal_preres, - trans->journal_preres_u64s, - (flags & BCH_WATERMARK_MASK))); -} - static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans, unsigned flags) { @@ -882,14 +871,6 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags } } - ret = bch2_journal_preres_get(&c->journal, - &trans->journal_preres, trans->journal_preres_u64s, - (flags & BCH_WATERMARK_MASK)|JOURNAL_RES_GET_NONBLOCK); - if (unlikely(ret == -BCH_ERR_journal_preres_get_blocked)) - ret = bch2_trans_journal_preres_get_cold(trans, flags, trace_ip); - if (unlikely(ret)) - return ret; - ret = bch2_trans_lock_write(trans); if (unlikely(ret)) return ret; @@ -1052,7 +1033,6 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) struct bch_fs *c = trans->c; struct btree_insert_entry *i = NULL; struct btree_write_buffered_key *wb; - unsigned u64s; int ret = 0; if (!trans->nr_updates && @@ -1112,13 +1092,8 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags)); - memset(&trans->journal_preres, 0, sizeof(trans->journal_preres)); - trans->journal_u64s = trans->extra_journal_entries.nr; - trans->journal_preres_u64s = 0; - trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names); - if (trans->journal_transaction_names) trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s); @@ -1134,16 +1109,11 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) if (i->key_cache_already_flushed) continue; - /* we're going to journal the key being updated: */ - u64s = jset_u64s(i->k->k.u64s); - if (i->cached && - likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) - trans->journal_preres_u64s += u64s; - if (i->flags & BTREE_UPDATE_NOJOURNAL) continue; - trans->journal_u64s += u64s; + /* we're going to journal the key being updated: */ + trans->journal_u64s += jset_u64s(i->k->k.u64s); /* and we're also going to log the overwrite: */ if (trans->journal_transaction_names) @@ -1175,8 +1145,6 @@ retry: trace_and_count(c, transaction_commit, trans, _RET_IP_); out: - bch2_journal_preres_put(&c->journal, &trans->journal_preres); - if (likely(!(flags & BTREE_INSERT_NOCHECK_RW))) bch2_write_ref_put(c, BCH_WRITE_REF_trans); out_reset: diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index f3669fa68591..6fbd4ef3df6b 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -327,7 +327,6 @@ struct bkey_cached { struct rhash_head hash; struct list_head list; - struct journal_preres res; struct journal_entry_pin journal; u64 seq; @@ -441,11 +440,9 @@ struct btree_trans { struct journal_entry_pin *journal_pin; struct journal_res journal_res; - struct journal_preres journal_preres; u64 *journal_seq; struct disk_reservation *disk_res; unsigned journal_u64s; - unsigned journal_preres_u64s; struct replicas_delta_list *fs_usage_deltas; }; diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 39c2db68123b..76f27bc9fa24 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -513,8 +513,6 @@ static void bch2_btree_update_free(struct btree_update *as, struct btree_trans * up_read(&c->gc_lock); as->took_gc_lock = false; - bch2_journal_preres_put(&c->journal, &as->journal_preres); - bch2_journal_pin_drop(&c->journal, &as->journal); bch2_journal_pin_flush(&c->journal, &as->journal); bch2_disk_reservation_put(c, &as->disk_res); @@ -734,8 +732,6 @@ err: bch2_journal_pin_drop(&c->journal, &as->journal); - bch2_journal_preres_put(&c->journal, &as->journal_preres); - mutex_lock(&c->btree_interior_update_lock); for (i = 0; i < as->nr_new_nodes; i++) { b = as->new_nodes[i]; @@ -1047,7 +1043,6 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, unsigned nr_nodes[2] = { 0, 0 }; unsigned update_level = level; enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; - unsigned journal_flags = 0; int ret = 0; u32 restart_count = trans->restart_count; @@ -1061,10 +1056,6 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, flags &= ~BCH_WATERMARK_MASK; flags |= watermark; - if (flags & BTREE_INSERT_JOURNAL_RECLAIM) - journal_flags |= JOURNAL_RES_GET_NONBLOCK; - journal_flags |= watermark; - while (1) { nr_nodes[!!update_level] += 1 + split; update_level++; @@ -1129,27 +1120,6 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, if (ret) goto err; - ret = bch2_journal_preres_get(&c->journal, &as->journal_preres, - BTREE_UPDATE_JOURNAL_RES, - journal_flags|JOURNAL_RES_GET_NONBLOCK); - if (ret) { - if (flags & BTREE_INSERT_JOURNAL_RECLAIM) { - ret = -BCH_ERR_journal_reclaim_would_deadlock; - goto err; - } - - ret = drop_locks_do(trans, - bch2_journal_preres_get(&c->journal, &as->journal_preres, - BTREE_UPDATE_JOURNAL_RES, - journal_flags)); - if (ret == -BCH_ERR_journal_preres_get_blocked) { - trace_and_count(c, trans_restart_journal_preres_get, trans, _RET_IP_, journal_flags); - ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get); - } - if (ret) - goto err; - } - ret = bch2_disk_reservation_get(c, &as->disk_res, (nr_nodes[0] + nr_nodes[1]) * btree_sectors(c), c->opts.metadata_replicas, diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index 4df21512d640..031076e75fa1 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -55,7 +55,6 @@ struct btree_update { unsigned update_level; struct disk_reservation disk_res; - struct journal_preres journal_preres; /* * BTREE_INTERIOR_UPDATING_NODE: diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 5b5d69f2316b..23a9b7845d11 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -526,36 +526,6 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, return ret; } -/* journal_preres: */ - -static bool journal_preres_available(struct journal *j, - struct journal_preres *res, - unsigned new_u64s, - unsigned flags) -{ - bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true); - - if (!ret && mutex_trylock(&j->reclaim_lock)) { - bch2_journal_reclaim(j); - mutex_unlock(&j->reclaim_lock); - } - - return ret; -} - -int __bch2_journal_preres_get(struct journal *j, - struct journal_preres *res, - unsigned new_u64s, - unsigned flags) -{ - int ret; - - closure_wait_event(&j->preres_wait, - (ret = bch2_journal_error(j)) || - journal_preres_available(j, res, new_u64s, flags)); - return ret; -} - /* journal_entry_res: */ void bch2_journal_entry_res_resize(struct journal *j, @@ -1306,7 +1276,6 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j)); prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk); prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk); - prt_printf(out, "prereserved:\t\t%u/%u\n", j->prereserved.reserved, j->prereserved.remaining); prt_printf(out, "watermark:\t\t%s\n", bch2_watermarks[j->watermark]); prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved); prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes); diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h index 011711e99c8d..c85d01cf4948 100644 --- a/fs/bcachefs/journal.h +++ b/fs/bcachefs/journal.h @@ -395,104 +395,6 @@ out: return 0; } -/* journal_preres: */ - -static inline void journal_set_watermark(struct journal *j) -{ - union journal_preres_state s = READ_ONCE(j->prereserved); - unsigned watermark = BCH_WATERMARK_stripe; - - if (fifo_free(&j->pin) < j->pin.size / 4) - watermark = max_t(unsigned, watermark, BCH_WATERMARK_copygc); - if (fifo_free(&j->pin) < j->pin.size / 8) - watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim); - - if (s.reserved > s.remaining) - watermark = max_t(unsigned, watermark, BCH_WATERMARK_copygc); - if (!s.remaining) - watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim); - - if (watermark == j->watermark) - return; - - swap(watermark, j->watermark); - if (watermark > j->watermark) - journal_wake(j); -} - -static inline void bch2_journal_preres_put(struct journal *j, - struct journal_preres *res) -{ - union journal_preres_state s = { .reserved = res->u64s }; - - if (!res->u64s) - return; - - s.v = atomic64_sub_return(s.v, &j->prereserved.counter); - res->u64s = 0; - - if (unlikely(s.waiting)) { - clear_bit(ilog2((((union journal_preres_state) { .waiting = 1 }).v)), - (unsigned long *) &j->prereserved.v); - closure_wake_up(&j->preres_wait); - } - - if (s.reserved <= s.remaining && j->watermark) - journal_set_watermark(j); -} - -int __bch2_journal_preres_get(struct journal *, - struct journal_preres *, unsigned, unsigned); - -static inline int bch2_journal_preres_get_fast(struct journal *j, - struct journal_preres *res, - unsigned new_u64s, - unsigned flags, - bool set_waiting) -{ - int d = new_u64s - res->u64s; - union journal_preres_state old, new; - u64 v = atomic64_read(&j->prereserved.counter); - enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; - int ret; - - do { - old.v = new.v = v; - ret = 0; - - if (watermark == BCH_WATERMARK_reclaim || - new.reserved + d < new.remaining) { - new.reserved += d; - ret = 1; - } else if (set_waiting && !new.waiting) - new.waiting = true; - else - return 0; - } while ((v = atomic64_cmpxchg(&j->prereserved.counter, - old.v, new.v)) != old.v); - - if (ret) - res->u64s += d; - return ret; -} - -static inline int bch2_journal_preres_get(struct journal *j, - struct journal_preres *res, - unsigned new_u64s, - unsigned flags) -{ - if (new_u64s <= res->u64s) - return 0; - - if (bch2_journal_preres_get_fast(j, res, new_u64s, flags, false)) - return 0; - - if (flags & JOURNAL_RES_GET_NONBLOCK) - return -BCH_ERR_journal_preres_get_blocked; - - return __bch2_journal_preres_get(j, res, new_u64s, flags); -} - /* journal_entry_res: */ void bch2_journal_entry_res_resize(struct journal *, diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index 9a584aaaa2eb..e63c6eda86af 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -50,16 +50,21 @@ unsigned bch2_journal_dev_buckets_available(struct journal *j, return available; } -static void journal_set_remaining(struct journal *j, unsigned u64s_remaining) +static inline void journal_set_watermark(struct journal *j, bool low_on_space) { - union journal_preres_state old, new; - u64 v = atomic64_read(&j->prereserved.counter); + unsigned watermark = BCH_WATERMARK_stripe; - do { - old.v = new.v = v; - new.remaining = u64s_remaining; - } while ((v = atomic64_cmpxchg(&j->prereserved.counter, - old.v, new.v)) != old.v); + if (low_on_space) + watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim); + if (fifo_free(&j->pin) < j->pin.size / 4) + watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim); + + if (watermark == j->watermark) + return; + + swap(watermark, j->watermark); + if (watermark > j->watermark) + journal_wake(j); } static struct journal_space @@ -162,7 +167,6 @@ void bch2_journal_space_available(struct journal *j) struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_dev *ca; unsigned clean, clean_ondisk, total; - s64 u64s_remaining = 0; unsigned max_entry_size = min(j->buf[0].buf_size >> 9, j->buf[1].buf_size >> 9); unsigned i, nr_online = 0, nr_devs_want; @@ -222,16 +226,10 @@ void bch2_journal_space_available(struct journal *j) else clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags); - u64s_remaining = (u64) clean << 6; - u64s_remaining -= (u64) total << 3; - u64s_remaining = max(0LL, u64s_remaining); - u64s_remaining /= 4; - u64s_remaining = min_t(u64, u64s_remaining, U32_MAX); + journal_set_watermark(j, clean * 4 <= total); out: j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0; j->cur_entry_error = ret; - journal_set_remaining(j, u64s_remaining); - journal_set_watermark(j); if (!ret) journal_wake(j); @@ -555,11 +553,6 @@ static u64 journal_seq_to_flush(struct journal *j) /* Try to keep the journal at most half full: */ nr_buckets = ja->nr / 2; - /* And include pre-reservations: */ - nr_buckets += DIV_ROUND_UP(j->prereserved.reserved, - (ca->mi.bucket_size << 6) - - journal_entry_overhead(j)); - nr_buckets = min(nr_buckets, ja->nr); bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr; @@ -638,10 +631,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) msecs_to_jiffies(c->opts.journal_reclaim_delay))) min_nr = 1; - if (j->prereserved.reserved * 4 > j->prereserved.remaining) - min_nr = 1; - - if (fifo_free(&j->pin) <= 32) + if (j->watermark != BCH_WATERMARK_stripe) min_nr = 1; if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used) @@ -652,8 +642,6 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) trace_and_count(c, journal_reclaim_start, c, direct, kicked, min_nr, min_key_cache, - j->prereserved.reserved, - j->prereserved.remaining, atomic_read(&c->btree_cache.dirty), c->btree_cache.used, atomic_long_read(&c->btree_key_cache.nr_dirty), diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h index 42504e16acb6..a756b69582e3 100644 --- a/fs/bcachefs/journal_types.h +++ b/fs/bcachefs/journal_types.h @@ -76,14 +76,6 @@ struct journal_res { u64 seq; }; -/* - * For reserving space in the journal prior to getting a reservation on a - * particular journal entry: - */ -struct journal_preres { - unsigned u64s; -}; - union journal_res_state { struct { atomic64_t counter; @@ -104,22 +96,6 @@ union journal_res_state { }; }; -union journal_preres_state { - struct { - atomic64_t counter; - }; - - struct { - u64 v; - }; - - struct { - u64 waiting:1, - reserved:31, - remaining:32; - }; -}; - /* bytes: */ #define JOURNAL_ENTRY_SIZE_MIN (64U << 10) /* 64k */ #define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */ @@ -180,8 +156,6 @@ struct journal { union journal_res_state reservations; enum bch_watermark watermark; - union journal_preres_state prereserved; - } __aligned(SMP_CACHE_BYTES); unsigned long flags; diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index 893304a1f06e..7857671159b4 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -196,10 +196,9 @@ DEFINE_EVENT(bio, journal_write, TRACE_EVENT(journal_reclaim_start, TP_PROTO(struct bch_fs *c, bool direct, bool kicked, u64 min_nr, u64 min_key_cache, - u64 prereserved, u64 prereserved_total, u64 btree_cache_dirty, u64 btree_cache_total, u64 btree_key_cache_dirty, u64 btree_key_cache_total), - TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total, + TP_ARGS(c, direct, kicked, min_nr, min_key_cache, btree_cache_dirty, btree_cache_total, btree_key_cache_dirty, btree_key_cache_total), @@ -209,8 +208,6 @@ TRACE_EVENT(journal_reclaim_start, __field(bool, kicked ) __field(u64, min_nr ) __field(u64, min_key_cache ) - __field(u64, prereserved ) - __field(u64, prereserved_total ) __field(u64, btree_cache_dirty ) __field(u64, btree_cache_total ) __field(u64, btree_key_cache_dirty ) @@ -223,22 +220,18 @@ TRACE_EVENT(journal_reclaim_start, __entry->kicked = kicked; __entry->min_nr = min_nr; __entry->min_key_cache = min_key_cache; - __entry->prereserved = prereserved; - __entry->prereserved_total = prereserved_total; __entry->btree_cache_dirty = btree_cache_dirty; __entry->btree_cache_total = btree_cache_total; __entry->btree_key_cache_dirty = btree_key_cache_dirty; __entry->btree_key_cache_total = btree_key_cache_total; ), - TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu", + TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->direct, __entry->kicked, __entry->min_nr, __entry->min_key_cache, - __entry->prereserved, - __entry->prereserved_total, __entry->btree_cache_dirty, __entry->btree_cache_total, __entry->btree_key_cache_dirty, From 069749688ea4bbaeff0ca3b229b443ea96b03757 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 11 Nov 2023 22:15:59 -0500 Subject: [PATCH 147/299] bcachefs: Fix iterator leak in may_delete_deleted_inode() may_delete_deleted_inode() was returning without exiting a btree iterator, eventually causing propagate_key_to_snaphot_leaves() to go into an infinite loop hitting btree_trans_too_many_iters(). Signed-off-by: Kent Overstreet --- fs/bcachefs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index def77f2d8802..dab12c14d1ad 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -1134,7 +1134,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans, * unlinked inodes in the snapshot leaves: */ *need_another_pass = true; - return 0; + goto out; } ret = 1; From b783fc4d1366658200bf759e1010655a9e2e145c Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Sun, 12 Nov 2023 00:38:41 +0000 Subject: [PATCH 148/299] bcachefs: Fix potential sleeping during mount During mount, bcachefs mount option processing may sleep while allocating a string buffer. Fix this by reference counting in order to take the atomic path. Signed-off-by: Daniel J Blueman Signed-off-by: Kent Overstreet --- fs/bcachefs/disk_groups.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c index 1f334124055b..4d0cb0ccff32 100644 --- a/fs/bcachefs/disk_groups.c +++ b/fs/bcachefs/disk_groups.c @@ -555,6 +555,7 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) case TARGET_DEV: { struct bch_dev *ca; + out->atomic++; rcu_read_lock(); ca = t.dev < c->sb.nr_devices ? rcu_dereference(c->devs[t.dev]) @@ -570,6 +571,7 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) } rcu_read_unlock(); + out->atomic--; break; } case TARGET_GROUP: From 178c4873fd06c0361d260547ce70fcdc29b74809 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 12 Nov 2023 14:15:35 -0500 Subject: [PATCH 149/299] bcachefs: Fix error path in bch2_mount() This fixes a bug discovered by generic/388 where sb->s_fs_info was NULL while the superblock was still active - the error path was entirely fubar, and was trying to do something unclear and unecessary. Signed-off-by: Kent Overstreet --- fs/bcachefs/fs.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 166d8d8abe68..8ef817304e4a 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -1922,10 +1922,7 @@ out: return dget(sb->s_root); err_put_super: - sb->s_fs_info = NULL; - c->vfs_sb = NULL; deactivate_locked_super(sb); - bch2_fs_stop(c); return ERR_PTR(bch2_err_class(ret)); } @@ -1933,11 +1930,8 @@ static void bch2_kill_sb(struct super_block *sb) { struct bch_fs *c = sb->s_fs_info; - if (c) - c->vfs_sb = NULL; generic_shutdown_super(sb); - if (c) - bch2_fs_free(c); + bch2_fs_free(c); } static struct file_system_type bcache_fs_type = { From f42fa17883e73d8509fff5925781d4157db82f00 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 12 Nov 2023 15:47:02 -0500 Subject: [PATCH 150/299] bcachefs: Fix missing transaction commit In may_delete_deleted_inode(), there's a corner case when a snapshot was taken while we had an unlinked inode: we don't want to delete the inode in the internal (shared) snapshot node, since it might have been reattached in a descendent snapshot. Instead we propagate the key to any snapshot leaves it doesn't exist in, so that it can be deleted there if necessary, and then clear the unlinked flag in the internal node. But we forgot to commit after clearing the unlinked flag, causing us to go into an infinite loop. Signed-off-by: Kent Overstreet --- fs/bcachefs/inode.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index dab12c14d1ad..c7849b0753e7 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -1169,8 +1169,10 @@ again: */ for_each_btree_key(trans, iter, BTREE_ID_deleted_inodes, POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, ret) { - ret = lockrestart_do(trans, may_delete_deleted_inode(trans, &iter, k.k->p, - &need_another_pass)); + ret = commit_do(trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW, + may_delete_deleted_inode(trans, &iter, k.k->p, &need_another_pass)); if (ret < 0) break; From 497c57a303590ea69ace23506e182c489e85694d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 12 Nov 2023 17:02:08 -0500 Subject: [PATCH 151/299] bcachefs: Disable debug log statements The journal read path had some informational log statements preperatory for ZNS support - they're not of interest to users, so we can turn them off. Signed-off-by: Kent Overstreet --- fs/bcachefs/journal_io.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index f4bc2cdbfdd7..786a09285509 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1079,6 +1079,12 @@ found: if (ja->bucket_seq[ja->cur_idx] && ja->sectors_free == ca->mi.bucket_size) { +#if 0 + /* + * Debug code for ZNS support, where we (probably) want to be + * correlated where we stopped in the journal to the zone write + * points: + */ bch_err(c, "ja->sectors_free == ca->mi.bucket_size"); bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr); for (i = 0; i < 3; i++) { @@ -1086,6 +1092,7 @@ found: bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]); } +#endif ja->sectors_free = 0; } From 7125063fc6dfb77138b3a100527f3d8f9203ff2a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 2 Mar 2023 23:52:57 -0500 Subject: [PATCH 152/299] bcachefs: Don't decrease BTREE_ITER_MAX when LOCKDEP=y Running with fewer max btree paths doesn't work anymore when replication is enabled - as we've added e.g. the freespace and bucket gens btrees, we naturally end up needing more btree paths. This is an issue with lockdep, we end up taking more locks than lockdep will track (the MAX_LOCKD_DEPTH constant). But bcachefs as merged does not yet support lockdep anyways, so we can leave that for later. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_types.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 6fbd4ef3df6b..60453ba86c4b 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -363,11 +363,7 @@ struct btree_insert_entry { unsigned long ip_allocated; }; -#ifndef CONFIG_LOCKDEP #define BTREE_ITER_MAX 64 -#else -#define BTREE_ITER_MAX 32 -#endif struct btree_trans_commit_hook; typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *); From db18ef1a02bc2cd924f86b2582302f2c2711b67c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 13 Nov 2023 21:17:19 -0500 Subject: [PATCH 153/299] bcachefs: Fix bch2_check_nlinks() for snapshots When searching the link table for the matching inode, we were searching for a specific - incorrect - snapshot ID as well, causing us to fail to find the inode. Signed-off-by: Kent Overstreet --- fs/bcachefs/fsck.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 9f3e9bd3d767..e0c5cd119acc 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -2220,7 +2220,7 @@ static int nlink_cmp(const void *_l, const void *_r) const struct nlink *l = _l; const struct nlink *r = _r; - return cmp_int(l->inum, r->inum) ?: cmp_int(l->snapshot, r->snapshot); + return cmp_int(l->inum, r->inum); } static void inc_link(struct bch_fs *c, struct snapshots_seen *s, From 62d73dfc44d54c97e0df6b947f0bccf6c4b8030e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 12 Nov 2023 21:46:52 -0500 Subject: [PATCH 154/299] bcachefs: Fix no_data_io mode checksum check In no_data_io mode, we expect data checksums to be wrong - don't want to spew the log with them. Signed-off-by: Kent Overstreet --- fs/bcachefs/io_write.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index f02b3f7d26a0..d704a8f829c8 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -795,7 +795,7 @@ static int bch2_write_decrypt(struct bch_write_op *op) * checksum: */ csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio); - if (bch2_crc_cmp(op->crc.csum, csum)) + if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io) return -EIO; ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio); From 61b85cb0d773115d9a4b20c3e67286844cf73f34 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 14 Nov 2023 18:52:22 -0500 Subject: [PATCH 155/299] bcachefs: six locks: Fix lost wakeup In percpu reader mode, trylock() for read had a lost wakeup: on failure to get the lock, we may have caused a writer to fail to get the lock, because we temporarily elevated the reader count. We need to check for waiters after decrementing the read count - not before. Signed-off-by: Kent Overstreet --- fs/bcachefs/six.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index b775cf0fb7cb..97790445e67a 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -163,8 +163,11 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type, this_cpu_sub(*lock->readers, !ret); preempt_enable(); - if (!ret && (old & SIX_LOCK_WAITING_write)) - ret = -1 - SIX_LOCK_write; + if (!ret) { + smp_mb(); + if (atomic_read(&lock->state) & SIX_LOCK_WAITING_write) + ret = -1 - SIX_LOCK_write; + } } else if (type == SIX_LOCK_write && lock->readers) { if (try) { atomic_add(SIX_LOCK_HELD_write, &lock->state); From 00a614fc3527ba8846e6d823013bcf724e9a68f6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 27 Oct 2023 17:26:23 +0200 Subject: [PATCH 156/299] accel/ivpu: avoid build failure with CONFIG_PM=n The usage count of struct dev_pm_info is an implementation detail that is only available if CONFIG_PM is enabled, so printing it in a debug message causes a build failure in configurations without PM: In file included from include/linux/device.h:15, from include/linux/pci.h:37, from drivers/accel/ivpu/ivpu_pm.c:8: drivers/accel/ivpu/ivpu_pm.c: In function 'ivpu_rpm_get_if_active': drivers/accel/ivpu/ivpu_pm.c:254:51: error: 'struct dev_pm_info' has no member named 'usage_count' 254 | atomic_read(&vdev->drm.dev->power.usage_count)); | ^ include/linux/dev_printk.h:129:48: note: in definition of macro 'dev_printk' 129 | _dev_printk(level, dev, fmt, ##__VA_ARGS__); \ | ^~~~~~~~~~~ drivers/accel/ivpu/ivpu_drv.h:75:17: note: in expansion of macro 'dev_dbg' 75 | dev_dbg((vdev)->drm.dev, "[%s] " fmt, #type, ##args); \ | ^~~~~~~ drivers/accel/ivpu/ivpu_pm.c:253:9: note: in expansion of macro 'ivpu_dbg' 253 | ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n", | ^~~~~~~~ The print message does not seem essential, so the easiest workaround is to just remove it. Fixes: c39dc15191c4 ("accel/ivpu: Read clock rate only if device is up") Signed-off-by: Arnd Bergmann Reviewed-by: Stanislaw Gruszka Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231027152633.528490-1-arnd@kernel.org (cherry picked from commit 1470acbef122c7e2e588f6346ce459c26d0568a2) Signed-off-by: Jacek Lawrynowicz --- drivers/accel/ivpu/ivpu_pm.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 0ace218783c8..e9b16cbc26f4 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -250,9 +250,6 @@ int ivpu_rpm_get_if_active(struct ivpu_device *vdev) { int ret; - ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n", - atomic_read(&vdev->drm.dev->power.usage_count)); - ret = pm_runtime_get_if_active(vdev->drm.dev, false); drm_WARN_ON(&vdev->drm, ret < 0); From 9fadd4509966e375952f31ae954ab5eae76f90fe Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 2 Nov 2023 12:52:18 -0700 Subject: [PATCH 157/299] MAINTAINERS: Remove stale entry for SBL platform driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Maurice is no longer with Intel and his e-mail address is no longer active. Remove the stale entry from Slim boot loader section. Signed-off-by: Jithu Joseph Link: https://lore.kernel.org/r/20231102195218.143440-1-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 97f51d5ec1cf..0666ddb65552 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11025,7 +11025,6 @@ F: drivers/net/wireless/intel/iwlwifi/ INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER M: Jithu Joseph -R: Maurice Ma S: Maintained W: https://slimbootloader.github.io/security/firmware-update.html F: drivers/platform/x86/intel/wmi/sbl-fw-update.c From 7a3c36eef9a5d13b16aa954da54224c9c6bed339 Mon Sep 17 00:00:00 2001 From: Stuart Hayhurst Date: Tue, 14 Nov 2023 11:38:08 +0000 Subject: [PATCH 158/299] platform/x86: ideapad-laptop: Set max_brightness before using it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit max_brightness is used in ideapad_kbd_bl_brightness_get() before it's set, causing ideapad_kbd_bl_brightness_get() to return -EINVAL sometimes. Fixes: ecaa1867b524 ("platform/x86: ideapad-laptop: Add support for keyboard backlights using KBLC ACPI symbol") Signed-off-by: Stuart Hayhurst Cc: stable@vger.kernel.org Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231114114055.6220-2-stuart.a.hayhurst@gmail.com Signed-off-by: Ilpo Järvinen --- drivers/platform/x86/ideapad-laptop.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index ac037540acfc..88eefccb6ed2 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1425,18 +1425,17 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv) if (WARN_ON(priv->kbd_bl.initialized)) return -EEXIST; - brightness = ideapad_kbd_bl_brightness_get(priv); - if (brightness < 0) - return brightness; - - priv->kbd_bl.last_brightness = brightness; - if (ideapad_kbd_bl_check_tristate(priv->kbd_bl.type)) { priv->kbd_bl.led.max_brightness = 2; } else { priv->kbd_bl.led.max_brightness = 1; } + brightness = ideapad_kbd_bl_brightness_get(priv); + if (brightness < 0) + return brightness; + + priv->kbd_bl.last_brightness = brightness; priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT; priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get; priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set; From c5dbf04160005e07e8ca7232a7faa77ab1547ae0 Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 13 Nov 2023 12:07:37 -0800 Subject: [PATCH 159/299] platform/x86: hp-bioscfg: Simplify return check in hp_add_other_attributes() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All cases in switch-case have a same goto on error, move the return check out of the switch. This is a cleanup. Signed-off-by: Harshit Mogalapalli Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231113200742.3593548-1-harshit.m.mogalapalli@oracle.com Signed-off-by: Ilpo Järvinen --- drivers/platform/x86/hp/hp-bioscfg/bioscfg.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index 5798b49ddaba..10676e1abc28 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -630,21 +630,19 @@ static int hp_add_other_attributes(int attr_type) switch (attr_type) { case HPWMI_SECURE_PLATFORM_TYPE: ret = hp_populate_secure_platform_data(attr_name_kobj); - if (ret) - goto err_other_attr_init; break; case HPWMI_SURE_START_TYPE: ret = hp_populate_sure_start_data(attr_name_kobj); - if (ret) - goto err_other_attr_init; break; default: ret = -EINVAL; - goto err_other_attr_init; } + if (ret) + goto err_other_attr_init; + mutex_unlock(&bioscfg_drv.mutex); return 0; From 5736aa9537c9b8927dec32d3d47c8c31fe560f62 Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 13 Nov 2023 12:07:38 -0800 Subject: [PATCH 160/299] platform/x86: hp-bioscfg: move mutex_lock() down in hp_add_other_attributes() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit attr_name_kobj's memory allocation is done with mutex_lock() held, this is not needed. Move allocation outside of mutex_lock() so unlock is not needed when allocation fails. Suggested-by: Ilpo Järvinen Signed-off-by: Harshit Mogalapalli Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231113200742.3593548-2-harshit.m.mogalapalli@oracle.com Signed-off-by: Ilpo Järvinen --- drivers/platform/x86/hp/hp-bioscfg/bioscfg.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index 10676e1abc28..a3599498c4e8 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -592,13 +592,11 @@ static int hp_add_other_attributes(int attr_type) int ret; char *attr_name; - mutex_lock(&bioscfg_drv.mutex); - attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL); - if (!attr_name_kobj) { - ret = -ENOMEM; - goto err_other_attr_init; - } + if (!attr_name_kobj) + return -ENOMEM; + + mutex_lock(&bioscfg_drv.mutex); /* Check if attribute type is supported */ switch (attr_type) { From 9e88b493157a9901fa498f23cc3c9ab82b43ce83 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 15 Nov 2023 13:36:25 +0100 Subject: [PATCH 161/299] ALSA: hda: i915: Alays handle -EPROBE_DEFER MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It turns out that even if the comment says that the driver can load fine, it's not really the case and no codecs are detected. Specifically for -EPROBE_DEFER, always fail the probe. This fixes a regression when HDA-intel is loaded before i915. Reported-by: Ville Syrjälä Closes: https://lore.kernel.org/r/ZVNUxZzCGcxQzqJX@intel.com Signed-off-by: Maarten Lankhorst Tested-by: Kai Vehmanen Fixes: e6d0c13e9f46 ("ALSA: hda: i915: Remove extra argument from snd_hdac_i915_init") Link: https://gitlab.freedesktop.org/drm/intel/-/issues/9671 Link: https://lore.kernel.org/r/20231115123625.74286-1-maarten.lankhorst@linux.intel.com Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e871afeeb383..e79508002bb1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2141,6 +2141,9 @@ static int azx_probe(struct pci_dev *pci, if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT) { err = snd_hdac_i915_init(azx_bus(chip)); if (err < 0) { + if (err == -EPROBE_DEFER) + goto out_free; + /* if the controller is bound only with HDMI/DP * (for HSW and BDW), we need to abort the probe; * for other chips, still continue probing as other From f40f939917b2b4cbf18450096c0ce1c58ed59fae Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 13 Nov 2023 12:07:39 -0800 Subject: [PATCH 162/299] platform/x86: hp-bioscfg: Fix error handling in hp_add_other_attributes() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 'attr_name_kobj' is allocated using kzalloc, but on all the error paths it is not freed, hence we have a memory leak. Fix the error path before kobject_init_and_add() by adding kfree(). kobject_put() must be always called after passing the object to kobject_init_and_add(). Only the error path which is immediately next to kobject_init_and_add() calls kobject_put() and not any other error path after it. Fix the error handling after kobject_init_and_add() by moving the kobject_put() into the goto label err_other_attr_init that is already used by all the error paths after kobject_init_and_add(). Fixes: a34fc329b189 ("platform/x86: hp-bioscfg: bioscfg") Cc: stable@vger.kernel.org # 6.6.x: c5dbf0416000: platform/x86: hp-bioscfg: Simplify return check in hp_add_other_attributes() Cc: stable@vger.kernel.org # 6.6.x: 5736aa9537c9: platform/x86: hp-bioscfg: move mutex_lock() down in hp_add_other_attributes() Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202309201412.on0VXJGo-lkp@intel.com/ Signed-off-by: Harshit Mogalapalli [ij: Added the stable dep tags] Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231113200742.3593548-3-harshit.m.mogalapalli@oracle.com Signed-off-by: Ilpo Järvinen --- drivers/platform/x86/hp/hp-bioscfg/bioscfg.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index a3599498c4e8..6ddca857cc4d 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -613,14 +613,14 @@ static int hp_add_other_attributes(int attr_type) default: pr_err("Error: Unknown attr_type: %d\n", attr_type); ret = -EINVAL; - goto err_other_attr_init; + kfree(attr_name_kobj); + goto unlock_drv_mutex; } ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype, NULL, "%s", attr_name); if (ret) { pr_err("Error encountered [%d]\n", ret); - kobject_put(attr_name_kobj); goto err_other_attr_init; } @@ -645,6 +645,8 @@ static int hp_add_other_attributes(int attr_type) return 0; err_other_attr_init: + kobject_put(attr_name_kobj); +unlock_drv_mutex: mutex_unlock(&bioscfg_drv.mutex); kfree(obj); return ret; From 92c47597db7d8fb500a4b04ebd457ec7360279cc Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 13 Nov 2023 12:07:40 -0800 Subject: [PATCH 163/299] platform/x86: hp-bioscfg: Remove unused obj in hp_add_other_attributes() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit acpi_object *obj is unused in this function, so delete it, also delete a unnecessary kfree(obj); Signed-off-by: Harshit Mogalapalli Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231113200742.3593548-4-harshit.m.mogalapalli@oracle.com Signed-off-by: Ilpo Järvinen --- drivers/platform/x86/hp/hp-bioscfg/bioscfg.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index 6ddca857cc4d..8c9f4f3227fc 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -588,7 +588,6 @@ static void release_attributes_data(void) static int hp_add_other_attributes(int attr_type) { struct kobject *attr_name_kobj; - union acpi_object *obj = NULL; int ret; char *attr_name; @@ -648,7 +647,6 @@ err_other_attr_init: kobject_put(attr_name_kobj); unlock_drv_mutex: mutex_unlock(&bioscfg_drv.mutex); - kfree(obj); return ret; } From a0d45c3f596be53c1bd8822a1984532d14fdcea9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 14 Nov 2023 09:55:50 -0700 Subject: [PATCH 164/299] io_uring/fdinfo: remove need for sqpoll lock for thread/pid retrieval A previous commit added a trylock for getting the SQPOLL thread info via fdinfo, but this introduced a regression where we often fail to get it if the thread is busy. For that case, we end up not printing the current CPU and PID info. Rather than rely on this lock, just print the pid we already stored in the io_sq_data struct, and ensure we update the current CPU every time we've slept or potentially rescheduled. The latter won't potentially be 100% accurate, but that wasn't the case before either as the task can get migrated at any time unless it has been pinned at creation time. We retain keeping the io_sq_data dereference inside the ctx->uring_lock, as it has always been, as destruction of the thread and data happen below that. We could make this RCU safe, but there's little point in doing that. With this, we always print the last valid information we had, rather than have spurious outputs with missing information. Fixes: 7644b1a1c9a7 ("io_uring/fdinfo: lock SQ thread while retrieving thread cpu/pid") Signed-off-by: Jens Axboe --- io_uring/fdinfo.c | 9 ++------- io_uring/sqpoll.c | 12 ++++++++++-- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index f04a43044d91..976e9500f651 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -145,13 +145,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f) if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { struct io_sq_data *sq = ctx->sq_data; - if (mutex_trylock(&sq->lock)) { - if (sq->thread) { - sq_pid = task_pid_nr(sq->thread); - sq_cpu = task_cpu(sq->thread); - } - mutex_unlock(&sq->lock); - } + sq_pid = sq->task_pid; + sq_cpu = sq->sq_cpu; } seq_printf(m, "SqThread:\t%d\n", sq_pid); diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index bd6c2c7959a5..65b5dbe3c850 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -214,6 +214,7 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd) did_sig = get_signal(&ksig); cond_resched(); mutex_lock(&sqd->lock); + sqd->sq_cpu = raw_smp_processor_id(); } return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); } @@ -229,10 +230,15 @@ static int io_sq_thread(void *data) snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); set_task_comm(current, buf); - if (sqd->sq_cpu != -1) + /* reset to our pid after we've set task_comm, for fdinfo */ + sqd->task_pid = current->pid; + + if (sqd->sq_cpu != -1) { set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); - else + } else { set_cpus_allowed_ptr(current, cpu_online_mask); + sqd->sq_cpu = raw_smp_processor_id(); + } mutex_lock(&sqd->lock); while (1) { @@ -261,6 +267,7 @@ static int io_sq_thread(void *data) mutex_unlock(&sqd->lock); cond_resched(); mutex_lock(&sqd->lock); + sqd->sq_cpu = raw_smp_processor_id(); } continue; } @@ -294,6 +301,7 @@ static int io_sq_thread(void *data) mutex_unlock(&sqd->lock); schedule(); mutex_lock(&sqd->lock); + sqd->sq_cpu = raw_smp_processor_id(); } list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) atomic_andnot(IORING_SQ_NEED_WAKEUP, From 430143b0d3611f4a9c8434319e5e504244749e79 Mon Sep 17 00:00:00 2001 From: Brenton Simpson Date: Tue, 14 Nov 2023 23:38:59 +0000 Subject: [PATCH 165/299] drm: panel-orientation-quirks: Add quirk for Lenovo Legion Go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Legion Go has a 2560x1600 portrait screen, with the native "up" facing the right controller (90° CW from the rest of the device). Signed-off-by: Brenton Simpson Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20231114233859.274189-1-appsforartists@google.com --- drivers/gpu/drm/drm_panel_orientation_quirks.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index d5c15292ae93..3d92f66e550c 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -336,6 +336,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Legion Go 8APU1 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"), + }, + .driver_data = (void *)&lcd1600x2560_leftside_up, }, { /* Lenovo Yoga Book X90F / X90L */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), From ae1aadb1eb8d3cbc52e42bee71d67bd4a71f9f07 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 16 Nov 2023 00:39:33 +1000 Subject: [PATCH 166/299] nouveau: don't fail driver load if no display hw present. If we get back ENODEV don't fail load. There are nvidia devices that don't have display blocks and the driver should work on those. Fixes: 15740541e8f0 ("drm/nouveau/devinit/tu102-: prepare for GSP-RM") Link: https://gitlab.freedesktop.org/drm/nouveau/-/issues/270 Signed-off-by: Dave Airlie Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231115143933.261287-1-airlied@gmail.com --- drivers/gpu/drm/nouveau/nouveau_display.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index d8c92521226d..f28f9a857458 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -726,6 +726,11 @@ nouveau_display_create(struct drm_device *dev) if (nouveau_modeset != 2) { ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0, &disp->disp); + /* no display hw */ + if (ret == -ENODEV) { + ret = 0; + goto disp_create_err; + } if (!ret && (disp->disp.outp_mask || drm->vbios.dcb.entries)) { nouveau_display_create_properties(dev); From 61cbc08fdb04fd445458b0f4cba7e6929afdfaef Mon Sep 17 00:00:00 2001 From: Stefan Binding Date: Wed, 15 Nov 2023 16:21:15 +0000 Subject: [PATCH 167/299] ALSA: hda/realtek: Add quirks for ASUS 2024 Zenbooks These ASUS Zenbook laptops use Realtek HDA codec combined with 2xCS35L41 Amplifiers using SPI or I2C with External Boost or Internal Boost. Signed-off-by: Stefan Binding Cc: Link: https://lore.kernel.org/r/20231115162116.494968-2-sbinding@opensource.cirrus.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 3c85b8247c11..a1e124370283 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9947,13 +9947,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B), SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1c03, "ASUS UM3406HA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1c33, "ASUS UX5304MA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC), From 5d639b60971f003d3a9b2b31f8ec73b0718b5d57 Mon Sep 17 00:00:00 2001 From: Stefan Binding Date: Wed, 15 Nov 2023 16:21:16 +0000 Subject: [PATCH 168/299] ALSA: hda/realtek: Add quirks for HP Laptops These HP laptops use Realtek HDA codec combined with 2 or 4 CS35L41 Amplifiers using SPI with Internal Boost. Signed-off-by: Stefan Binding Cc: Link: https://lore.kernel.org/r/20231115162116.494968-3-sbinding@opensource.cirrus.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a1e124370283..5618b1d9bfd1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9902,6 +9902,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), From 85c2ceaafbd306814a3a4740bf4d95ac26a8b36a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 6 Nov 2023 17:07:40 +0300 Subject: [PATCH 169/299] mm/damon/sysfs: eliminate potential uninitialized variable warning The "err" variable is not initialized if damon_target_has_pid(ctx) is false and sys_target->regions->nr is zero. Link: https://lkml.kernel.org/r/739e6aaf-a634-4e33-98a8-16546379ec9f@moroto.mountain Fixes: 0bcd216c4741 ("mm/damon/sysfs: update monitoring target regions for online input commit") Signed-off-by: Dan Carpenter Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index e27846708b5a..1dfa96d4de99 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1172,7 +1172,7 @@ static int damon_sysfs_update_target(struct damon_target *target, struct damon_ctx *ctx, struct damon_sysfs_target *sys_target) { - int err; + int err = 0; if (damon_target_has_pid(ctx)) { err = damon_sysfs_update_target_pid(target, sys_target->pid); From 019b277b680f5b95135c042c78dd79318d8f9e3c Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Fri, 3 Nov 2023 23:23:41 +0500 Subject: [PATCH 170/299] selftests: mm: skip whole test instead of failure Some architectures don't support userfaultfd. Skip running the whole test on them instead of registering the failure. Link: https://lkml.kernel.org/r/20231103182343.2874015-1-usama.anjum@collabora.com Fixes: 46fd75d4a3c9 ("selftests: mm: add pagemap ioctl tests") Reported-by: Ryan Roberts Closes: https://lore.kernel.org/all/f8463381-2697-49e9-9460-9dc73452830d@arm.com Signed-off-by: Muhammad Usama Anjum Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pagemap_ioctl.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c index 0161fb49fc6e..f8685a2ea07e 100644 --- a/tools/testing/selftests/mm/pagemap_ioctl.c +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -94,19 +94,19 @@ int init_uffd(void) uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY); if (uffd == -1) - ksft_exit_fail_msg("uffd syscall failed\n"); + return uffd; uffdio_api.api = UFFD_API; uffdio_api.features = UFFD_FEATURE_WP_UNPOPULATED | UFFD_FEATURE_WP_ASYNC | UFFD_FEATURE_WP_HUGETLBFS_SHMEM; if (ioctl(uffd, UFFDIO_API, &uffdio_api)) - ksft_exit_fail_msg("UFFDIO_API\n"); + return -1; if (!(uffdio_api.api & UFFDIO_REGISTER_MODE_WP) || !(uffdio_api.features & UFFD_FEATURE_WP_UNPOPULATED) || !(uffdio_api.features & UFFD_FEATURE_WP_ASYNC) || !(uffdio_api.features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM)) - ksft_exit_fail_msg("UFFDIO_API error %llu\n", uffdio_api.api); + return -1; return 0; } @@ -1479,6 +1479,10 @@ int main(void) struct stat sbuf; ksft_print_header(); + + if (init_uffd()) + return ksft_exit_pass(); + ksft_set_plan(115); page_size = getpagesize(); @@ -1488,9 +1492,6 @@ int main(void) if (pagemap_fd < 0) return -EINVAL; - if (init_uffd()) - ksft_exit_fail_msg("uffd init failed\n"); - /* 1. Sanity testing */ sanity_tests_sd(); From 9297e5360c3bd777f95d5146dbeda7fb9ba4273a Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Fri, 3 Nov 2023 23:23:42 +0500 Subject: [PATCH 171/299] selftests: mm: fix some build warnings Fix build warnings: pagemap_ioctl.c:1154:38: warning: format `%s' expects a matching `char *' argument [-Wformat=] pagemap_ioctl.c:1162:51: warning: format `%ld' expects argument of type `long int', but argument 2 has type `int' [-Wformat=] pagemap_ioctl.c:1192:51: warning: format `%ld' expects argument of type `long int', but argument 2 has type `int' [-Wformat=] pagemap_ioctl.c:1600:51: warning: format `%ld' expects argument of type `long int', but argument 2 has type `int' [-Wformat=] pagemap_ioctl.c:1628:51: warning: format `%ld' expects argument of type `long int', but argument 2 has type `int' [-Wformat=] Link: https://lkml.kernel.org/r/20231103182343.2874015-2-usama.anjum@collabora.com Fixes: 46fd75d4a3c9 ("selftests: mm: add pagemap ioctl tests") Signed-off-by: Muhammad Usama Anjum Cc: Shuah Khan Cc: Ryan Roberts Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pagemap_ioctl.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c index f8685a2ea07e..befab43719ba 100644 --- a/tools/testing/selftests/mm/pagemap_ioctl.c +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -1151,7 +1151,7 @@ int sanity_tests(void) /* 9. Memory mapped file */ fd = open(__FILE__, O_RDONLY); if (fd < 0) - ksft_exit_fail_msg("%s Memory mapped file\n"); + ksft_exit_fail_msg("%s Memory mapped file\n", __func__); ret = stat(__FILE__, &sbuf); if (ret < 0) @@ -1159,7 +1159,7 @@ int sanity_tests(void) fmem = mmap(NULL, sbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0); if (fmem == MAP_FAILED) - ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); + ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno)); tmp_buf = malloc(sbuf.st_size); memcpy(tmp_buf, fmem, sbuf.st_size); @@ -1189,7 +1189,7 @@ int sanity_tests(void) fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fmem == MAP_FAILED) - ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); + ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno)); wp_init(fmem, buf_size); wp_addr_range(fmem, buf_size); @@ -1596,7 +1596,7 @@ int main(void) fmem = mmap(NULL, sbuf.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fmem == MAP_FAILED) - ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); + ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno)); wp_init(fmem, sbuf.st_size); wp_addr_range(fmem, sbuf.st_size); @@ -1624,7 +1624,7 @@ int main(void) fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (fmem == MAP_FAILED) - ksft_exit_fail_msg("error nomem %ld %s\n", errno, strerror(errno)); + ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno)); wp_init(fmem, buf_size); wp_addr_range(fmem, buf_size); From dd9b35efd719be242e227f9eebad1e50ea5c914f Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 3 Nov 2023 10:33:59 -0700 Subject: [PATCH 172/299] selftests/mm: restore number of hugepages The test mm `hugetlb_fault_after_madv` selftest needs one and only one huge page to run, thus it sets `/proc/sys/vm/nr_hugepages` to 1. The problem is that further tests require the previous number of hugepages allocated in order to succeed. Save the number of huge pages before changing it, and restore it once the test finishes, so, further tests could run successfully. Link: https://lkml.kernel.org/r/20231103173400.1608403-1-leitao@debian.org Fixes: 116d57303a05 ("selftests/mm: add a new test for madv and hugetlb") Signed-off-by: Breno Leitao Reported-by: Ryan Roberts Closes: https://lore.kernel.org/all/662df57e-47f1-4c15-9b84-f2f2d587fc5c@arm.com/ Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/run_vmtests.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index cc16f6ca8533..00757445278e 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -223,9 +223,12 @@ CATEGORY="hugetlb" run_test ./hugepage-mremap CATEGORY="hugetlb" run_test ./hugepage-vmemmap CATEGORY="hugetlb" run_test ./hugetlb-madvise +nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages) # For this test, we need one and just one huge page echo 1 > /proc/sys/vm/nr_hugepages CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv +# Restore the previous number of huge pages, since further tests rely on it +echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages if test_selected "hugetlb"; then echo "NOTE: These hugetlb tests provide minimal coverage. Use" From edf14544324dd036183fafe372fe5709708bdddd Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 3 Nov 2023 10:34:00 -0700 Subject: [PATCH 173/299] selftests/mm: add hugetlb_fault_after_madv to .gitignore commit 116d57303a05 ("selftests/mm: add a new test for madv and hugetlb") added a new test case, but, it didn't add the binary name in tools/testing/selftests/mm/.gitignore. Add hugetlb_fault_after_madv to tools/testing/selftests/mm/.gitignore. Link: https://lkml.kernel.org/r/20231103173400.1608403-2-leitao@debian.org Fixes: 116d57303a05 ("selftests/mm: add a new test for madv and hugetlb") Signed-off-by: Breno Leitao Reported-by: Ryan Roberts Closes: https://lore.kernel.org/all/662df57e-47f1-4c15-9b84-f2f2d587fc5c@arm.com/ Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index cc920c79ff1c..4ff10ea61461 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -45,3 +45,4 @@ mdwe_test gup_longterm mkdirty va_high_addr_switch +hugetlb_fault_after_madv From a48d5bdc877b85201e42cef9c2fdf5378164c23a Mon Sep 17 00:00:00 2001 From: Stefan Roesch Date: Mon, 6 Nov 2023 10:19:18 -0800 Subject: [PATCH 174/299] mm: fix for negative counter: nr_file_hugepages While qualifiying the 6.4 release, the following warning was detected in messages: vmstat_refresh: nr_file_hugepages -15664 The warning is caused by the incorrect updating of the NR_FILE_THPS counter in the function split_huge_page_to_list. The if case is checking for folio_test_swapbacked, but the else case is missing the check for folio_test_pmd_mappable. The other functions that manipulate the counter like __filemap_add_folio and filemap_unaccount_folio have the corresponding check. I have a test case, which reproduces the problem. It can be found here: https://github.com/sroeschus/testcase/blob/main/vmstat_refresh/madv.c The test case reproduces on an XFS filesystem. Running the same test case on a BTRFS filesystem does not reproduce the problem. AFAIK version 6.1 until 6.6 are affected by this problem. [akpm@linux-foundation.org: whitespace fix] [shr@devkernel.io: test for folio_test_pmd_mappable()] Link: https://lkml.kernel.org/r/20231108171517.2436103-1-shr@devkernel.io Link: https://lkml.kernel.org/r/20231106181918.1091043-1-shr@devkernel.io Signed-off-by: Stefan Roesch Co-debugged-by: Johannes Weiner Acked-by: Johannes Weiner Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: Yang Shi Cc: Rik van Riel Cc: Signed-off-by: Andrew Morton --- mm/huge_memory.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f31f02472396..4f542444a91f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2769,13 +2769,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) int nr = folio_nr_pages(folio); xas_split(&xas, folio, folio_order(folio)); - if (folio_test_swapbacked(folio)) { - __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, - -nr); - } else { - __lruvec_stat_mod_folio(folio, NR_FILE_THPS, - -nr); - filemap_nr_thps_dec(mapping); + if (folio_test_pmd_mappable(folio)) { + if (folio_test_swapbacked(folio)) { + __lruvec_stat_mod_folio(folio, + NR_SHMEM_THPS, -nr); + } else { + __lruvec_stat_mod_folio(folio, + NR_FILE_THPS, -nr); + filemap_nr_thps_dec(mapping); + } } } From b4936b544b08ed44949055b92bd25f77759ebafc Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Nov 2023 23:34:06 +0000 Subject: [PATCH 175/299] mm/damon/sysfs: check error from damon_sysfs_update_target() Patch series "mm/damon/sysfs: fix unhandled return values". Some of DAMON sysfs interface code is not handling return values from some functions. As a result, confusing user input handling or NULL-dereference is possible. Check those properly. This patch (of 3): damon_sysfs_update_target() returns error code for failures, but its caller, damon_sysfs_set_targets() is ignoring that. The update function seems making no critical change in case of such failures, but the behavior will look like DAMON sysfs is silently ignoring or only partially accepting the user input. Fix it. Link: https://lkml.kernel.org/r/20231106233408.51159-1-sj@kernel.org Link: https://lkml.kernel.org/r/20231106233408.51159-2-sj@kernel.org Fixes: 19467a950b49 ("mm/damon/sysfs: remove requested targets when online-commit inputs") Signed-off-by: SeongJae Park Cc: [5.19+] Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 1dfa96d4de99..7472404456aa 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1203,8 +1203,10 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx, damon_for_each_target_safe(t, next, ctx) { if (i < sysfs_targets->nr) { - damon_sysfs_update_target(t, ctx, + err = damon_sysfs_update_target(t, ctx, sysfs_targets->targets_arr[i]); + if (err) + return err; } else { if (damon_target_has_pid(ctx)) put_pid(t->pid); From 84055688b6bc075c92a88e2d6c3ad26ab93919f9 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Nov 2023 23:34:07 +0000 Subject: [PATCH 176/299] mm/damon/sysfs-schemes: handle tried regions sysfs directory allocation failure DAMOS tried regions sysfs directory allocation function (damon_sysfs_scheme_regions_alloc()) is not handling the memory allocation failure. In the case, the code will dereference NULL pointer. Handle the failure to avoid such invalid access. Link: https://lkml.kernel.org/r/20231106233408.51159-3-sj@kernel.org Fixes: 9277d0367ba1 ("mm/damon/sysfs-schemes: implement scheme region directory") Signed-off-by: SeongJae Park Cc: [6.2+] Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 45bd0fd4a8b1..7413cb35c5a9 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -162,6 +162,9 @@ damon_sysfs_scheme_regions_alloc(void) struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions), GFP_KERNEL); + if (!regions) + return NULL; + regions->kobj = (struct kobject){}; INIT_LIST_HEAD(®ions->regions_list); regions->nr_regions = 0; From ae636ae2bbfd9279f5681dbf320d1da817e52b68 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Nov 2023 23:34:08 +0000 Subject: [PATCH 177/299] mm/damon/sysfs-schemes: handle tried region directory allocation failure DAMON sysfs interface's before_damos_apply callback (damon_sysfs_before_damos_apply()), which creates the DAMOS tried regions for each DAMOS action applied region, is not handling the allocation failure for the sysfs directory data. As a result, NULL pointer derefeence is possible. Fix it by handling the case. Link: https://lkml.kernel.org/r/20231106233408.51159-4-sj@kernel.org Fixes: f1d13cacabe1 ("mm/damon/sysfs: implement DAMOS tried regions update command") Signed-off-by: SeongJae Park Cc: [6.2+] Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 7413cb35c5a9..be667236b8e6 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -1826,6 +1826,8 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, return 0; region = damon_sysfs_scheme_region_alloc(r); + if (!region) + return 0; list_add_tail(®ion->list, &sysfs_regions->regions_list); sysfs_regions->nr_regions++; if (kobject_init_and_add(®ion->kobj, From 24948e3b7b12e0031a6edb4f49bbb9fb2ad1e4e9 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 7 Nov 2023 09:18:02 -0800 Subject: [PATCH 178/299] mm: kmem: drop __GFP_NOFAIL when allocating objcg vectors Objcg vectors attached to slab pages to store slab object ownership information are allocated using gfp flags for the original slab allocation. Depending on slab page order and the size of slab objects, objcg vector can take several pages. If the original allocation was done with the __GFP_NOFAIL flag, it triggered a warning in the page allocation code. Indeed, order > 1 pages should not been allocated with the __GFP_NOFAIL flag. Fix this by simply dropping the __GFP_NOFAIL flag when allocating the objcg vector. It effectively allows to skip the accounting of a single slab object under a heavy memory pressure. An alternative would be to implement the mechanism to fallback to order-0 allocations for accounting metadata, which is also not perfect because it will increase performance penalty and memory footprint of the kernel memory accounting under memory pressure. Link: https://lkml.kernel.org/r/ZUp8ZFGxwmCx4ZFr@P9FQF9L96D.corp.robot.car Signed-off-by: Roman Gushchin Reported-by: Christoph Lameter Closes: https://lkml.kernel.org/r/6b42243e-f197-600a-5d22-56bd728a5ad8@gentwo.org Acked-by: Shakeel Butt Cc: Matthew Wilcox Cc: Signed-off-by: Andrew Morton --- mm/memcontrol.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 774bd6e21e27..1c1061df9cd1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2936,7 +2936,8 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) * Moreover, it should not come from DMA buffer and is not readily * reclaimable. So those GFP bits should be masked off. */ -#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) +#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ + __GFP_ACCOUNT | __GFP_NOFAIL) /* * mod_objcg_mlstate() may be called with irq enabled, so From 13b2a4b22e98ff80b888a160a2acd92d81b05925 Mon Sep 17 00:00:00 2001 From: Hyeongtak Ji Date: Fri, 10 Nov 2023 14:37:09 +0900 Subject: [PATCH 179/299] mm/damon/core.c: avoid unintentional filtering out of schemes The function '__damos_filter_out()' causes DAMON to always filter out schemes whose filter type is anon or memcg if its matching value is set to false. This commit addresses the issue by ensuring that '__damos_filter_out()' no longer applies to filters whose type is 'anon' or 'memcg'. Link: https://lkml.kernel.org/r/1699594629-3816-1-git-send-email-hyeongtak.ji@gmail.com Fixes: ab9bda001b681 ("mm/damon/core: introduce address range type damos filter") Signed-off-by: Hyeongtak Ji Reviewed-by: SeongJae Park Cc: Signed-off-by: Andrew Morton --- mm/damon/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 630077d95dc6..6262d55904e7 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -924,7 +924,7 @@ static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, matched = true; break; default: - break; + return false; } return matched == filter->matching; From 5f74f820f6fc844b95f9e5e406e0a07d97510420 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 13 Nov 2023 11:12:57 +0100 Subject: [PATCH 180/299] parisc: fix mmap_base calculation when stack grows upwards Matoro reported various userspace crashes on the parisc platform with kernel 6.6 and bisected it to commit 3033cd430768 ("parisc: Use generic mmap top-down layout and brk randomization"). That commit switched parisc to use the common infrastructure to calculate mmap_base, but missed that the mmap_base() function takes care for architectures where the stack grows downwards only. Fix the mmap_base() calculation to include the stack-grows-upwards case and thus fix the userspace crashes on parisc. Link: https://lkml.kernel.org/r/ZVH2qeS1bG7/1J/l@p100 Fixes: 3033cd430768 ("parisc: Use generic mmap top-down layout and brk randomization") Signed-off-by: Helge Deller Reported-by: matoro Tested-by: matoro Cc: [6.6+] Signed-off-by: Andrew Morton --- arch/parisc/Kconfig | 6 +++--- arch/parisc/include/asm/elf.h | 10 +--------- arch/parisc/include/asm/processor.h | 2 ++ arch/parisc/kernel/sys_parisc.c | 2 +- mm/util.c | 10 ++++++++++ 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index fd69dfa0cdab..a7c9c0e69e5a 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -140,11 +140,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN default 8 config ARCH_MMAP_RND_BITS_MAX - default 24 if 64BIT - default 17 + default 18 if 64BIT + default 13 config ARCH_MMAP_RND_COMPAT_BITS_MAX - default 17 + default 13 # unless you want to implement ACPI on PA-RISC ... ;-) config PM diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 140eaa97bf21..2d73d3c3cd37 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */ #define ELF_HWCAP 0 -/* Masks for stack and mmap randomization */ -#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL) -#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL) -#define STACK_RND_MASK MMAP_RND_MASK - -struct mm_struct; -extern unsigned long arch_randomize_brk(struct mm_struct *); -#define arch_randomize_brk arch_randomize_brk - +#define STACK_RND_MASK 0x7ff /* 8MB of VA */ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 struct linux_binprm; diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index c05d121cf5d0..982aca20f56f 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -47,6 +47,8 @@ #ifndef __ASSEMBLY__ +struct rlimit; +unsigned long mmap_upper_limit(struct rlimit *rlim_stack); unsigned long calc_max_stack_size(unsigned long stack_max); /* diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index ab896eff7a1d..98af719d5f85 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max) * indicating that "current" should be used instead of a passed-in * value from the exec bprm as done with arch_pick_mmap_layout(). */ -static unsigned long mmap_upper_limit(struct rlimit *rlim_stack) +unsigned long mmap_upper_limit(struct rlimit *rlim_stack) { unsigned long stack_base; diff --git a/mm/util.c b/mm/util.c index aa01f6ea5a75..744b4d7e3fae 100644 --- a/mm/util.c +++ b/mm/util.c @@ -414,6 +414,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { +#ifdef CONFIG_STACK_GROWSUP + /* + * For an upwards growing stack the calculation is much simpler. + * Memory for the maximum stack size is reserved at the top of the + * task. mmap_base starts directly below the stack and grows + * downwards. + */ + return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); +#else unsigned long gap = rlim_stack->rlim_cur; unsigned long pad = stack_guard_gap; @@ -431,6 +440,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) gap = MAX_GAP; return PAGE_ALIGN(STACK_TOP - gap - rnd); +#endif } void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) From afccb0804fc74ac2f6737af6a139632606cb461d Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 14 Nov 2023 15:49:45 +0000 Subject: [PATCH 181/299] mm: more ptep_get() conversion Commit c33c794828f2 ("mm: ptep_get() conversion") converted all (non-arch) call sites to use ptep_get() instead of doing a direct dereference of the pte. Full rationale can be found in that commit's log. Since then, three new call sites have snuck in, which directly dereference the pte, so let's fix those up. Unfortunately there is no reliable automated mechanism to catch these; I'm relying on a combination of Coccinelle (which throws up a lot of false positives) and some compiler magic to force a compiler error on dereference (While this approach finds dereferences, it also yields a non-booting kernel so can't be committed). Link: https://lkml.kernel.org/r/20231114154945.490401-1-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/filemap.c | 2 +- mm/ksm.c | 2 +- mm/userfaultfd.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 9710f43a89ac..32eedf3afd45 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3443,7 +3443,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, * handled in the specific fault path, and it'll prohibit the * fault-around logic. */ - if (!pte_none(vmf->pte[count])) + if (!pte_none(ptep_get(&vmf->pte[count]))) goto skip; count++; diff --git a/mm/ksm.c b/mm/ksm.c index 7efcc68ccc6e..6a831009b4cb 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -468,7 +468,7 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex page = pfn_swap_entry_to_page(entry); } /* return 1 if the page is an normal ksm page or KSM-placed zero page */ - ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte); + ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent); pte_unmap_unlock(pte, ptl); return ret; } diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 96d9eae5c7cc..0b6ca553bebe 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -312,7 +312,7 @@ static int mfill_atomic_pte_poison(pmd_t *dst_pmd, ret = -EEXIST; /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */ - if (!pte_none(*dst_pte)) + if (!pte_none(ptep_get(dst_pte))) goto out_unlock; set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); From 1c4a7587d1bbee0fd53b63af60e4244a62775f57 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 1 Nov 2023 02:46:27 +0900 Subject: [PATCH 182/299] modpost: fix section mismatch message for RELA The section mismatch check prints a bogus symbol name on some architectures. [test code] #include int __initdata foo; int get_foo(void) { return foo; } If you compile it with GCC for riscv or loongarch, modpost will show an incorrect symbol name: WARNING: modpost: vmlinux: section mismatch in reference: get_foo+0x8 (section: .text) -> done (section: .init.data) To get the correct symbol address, the st_value must be added. This issue has never been noticed since commit 93684d3b8062 ("kbuild: include symbol names in section mismatch warnings") presumably because st_value becomes zero on most architectures when the referenced symbol is looked up. It is not true for riscv or loongarch, at least. With this fix, modpost will show the correct symbol name: WARNING: modpost: vmlinux: section mismatch in reference: get_foo+0x8 (section: .text) -> foo (section: .init.data) Signed-off-by: Masahiro Yamada Reviewed-by: Nick Desaulniers --- scripts/mod/modpost.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 973b5e5ae2dd..cb6406f485a9 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1383,13 +1383,15 @@ static void section_rela(struct module *mod, struct elf_info *elf, const Elf_Rela *rela; for (rela = start; rela < stop; rela++) { + Elf_Sym *tsym; Elf_Addr taddr, r_offset; unsigned int r_type, r_sym; r_offset = TO_NATIVE(rela->r_offset); get_rel_type_and_sym(elf, rela->r_info, &r_type, &r_sym); - taddr = TO_NATIVE(rela->r_addend); + tsym = elf->symtab_start + r_sym; + taddr = tsym->st_value + TO_NATIVE(rela->r_addend); switch (elf->hdr->e_machine) { case EM_RISCV: @@ -1404,7 +1406,7 @@ static void section_rela(struct module *mod, struct elf_info *elf, break; } - check_section_mismatch(mod, elf, elf->symtab_start + r_sym, + check_section_mismatch(mod, elf, tsym, fsecndx, fromsec, r_offset, taddr); } } From ba276ce5865b5a22ee96c4c5664bfefd9c1bb593 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 14 Nov 2023 19:11:04 -0500 Subject: [PATCH 183/299] bcachefs: Fix missing locking for dentry->d_parent access Reported-by: Al Viro Signed-off-by: Kent Overstreet --- fs/bcachefs/xattr.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c index a39ff0c296ec..79d982674c18 100644 --- a/fs/bcachefs/xattr.c +++ b/fs/bcachefs/xattr.c @@ -552,6 +552,14 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, s.v = v + 1; s.defined = true; } else { + /* + * Check if this option was set on the parent - if so, switched + * back to inheriting from the parent: + * + * rename() also has to deal with keeping inherited options up + * to date - see bch2_reinherit_attrs() + */ + spin_lock(&dentry->d_lock); if (!IS_ROOT(dentry)) { struct bch_inode_info *dir = to_bch_ei(d_inode(dentry->d_parent)); @@ -560,6 +568,7 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, } else { s.v = 0; } + spin_unlock(&dentry->d_lock); s.defined = false; } From ccab434e674ca95d483788b1895a70c21b7f016a Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Wed, 15 Nov 2023 11:08:57 +0100 Subject: [PATCH 184/299] usb: aqc111: check packet for fixup for true limit If a device sends a packet that is inbetween 0 and sizeof(u64) the value passed to skb_trim() as length will wrap around ending up as some very large value. The driver will then proceed to parse the header located at that position, which will either oops or process some random value. The fix is to check against sizeof(u64) rather than 0, which the driver currently does. The issue exists since the introduction of the driver. Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/aqc111.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index a017e9de2119..7b8afa589a53 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1079,17 +1079,17 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 pkt_count = 0; u64 desc_hdr = 0; u16 vlan_tag = 0; - u32 skb_len = 0; + u32 skb_len; if (!skb) goto err; - if (skb->len == 0) + skb_len = skb->len; + if (skb_len < sizeof(desc_hdr)) goto err; - skb_len = skb->len; /* RX Descriptor Header */ - skb_trim(skb, skb->len - sizeof(desc_hdr)); + skb_trim(skb, skb_len - sizeof(desc_hdr)); desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb)); /* Check these packets */ From 7fbd5fc2b35a8f559a6b380dfa9bcd964a758186 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Wed, 15 Nov 2023 11:53:31 +0100 Subject: [PATCH 185/299] stmmac: dwmac-loongson: Add architecture dependency Only present the DWMAC_LOONGSON option on architectures where it can actually be used. This follows the same logic as the DWMAC_INTEL option. Signed-off-by: Jean Delvare Cc: Keguang Zhang Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index a2b9e289aa36..85dcda51df05 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -280,7 +280,7 @@ config DWMAC_INTEL config DWMAC_LOONGSON tristate "Loongson PCI DWMAC support" default MACH_LOONGSON64 - depends on STMMAC_ETH && PCI + depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI depends on COMMON_CLK help This selects the LOONGSON PCI bus support for the stmmac driver, From 0c3bd086d12d185650d095a906662593ec607bd0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 15 Nov 2023 17:15:40 +0000 Subject: [PATCH 186/299] rxrpc: Fix some minor issues with bundle tracing Fix some superficial issues with the tracing of rxrpc_bundle structs, including: (1) Set the debug_id when the bundle is allocated rather than when it is set up so that the "NEW" trace line displays the correct bundle ID. (2) Show the refcount when emitting the "FREE" traceline. Signed-off-by: David Howells cc: Marc Dionne cc: "David S. Miller" cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: linux-afs@lists.infradead.org cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- net/rxrpc/conn_client.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 981ca5b98bcb..1d95f8bc769f 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -73,6 +73,7 @@ static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local) static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call, gfp_t gfp) { + static atomic_t rxrpc_bundle_id; struct rxrpc_bundle *bundle; bundle = kzalloc(sizeof(*bundle), gfp); @@ -85,6 +86,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call, bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); bundle->service_id = call->dest_srx.srx_service; bundle->security_level = call->security_level; + bundle->debug_id = atomic_inc_return(&rxrpc_bundle_id); refcount_set(&bundle->ref, 1); atomic_set(&bundle->active, 1); INIT_LIST_HEAD(&bundle->waiting_calls); @@ -105,7 +107,8 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle, static void rxrpc_free_bundle(struct rxrpc_bundle *bundle) { - trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_free); + trace_rxrpc_bundle(bundle->debug_id, refcount_read(&bundle->ref), + rxrpc_bundle_free); rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle); key_put(bundle->key); kfree(bundle); @@ -239,7 +242,6 @@ dont_reuse: */ int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp) { - static atomic_t rxrpc_bundle_id; struct rxrpc_bundle *bundle, *candidate; struct rxrpc_local *local = call->local; struct rb_node *p, **pp, *parent; @@ -306,7 +308,6 @@ int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp) } _debug("new bundle"); - candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id); rb_link_node(&candidate->local_node, parent, pp); rb_insert_color(&candidate->local_node, &local->client_bundles); call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call); From d565fa4300d9ebd5ba3bbd259ce841f8dab609d6 Mon Sep 17 00:00:00 2001 From: Gerd Bayer Date: Wed, 15 Nov 2023 16:59:58 +0100 Subject: [PATCH 187/299] s390/ism: ism driver implies smc protocol Since commit a72178cfe855 ("net/smc: Fix dependency of SMC on ISM") you can build the ism code without selecting the SMC network protocol. That leaves some ism functions be reported as unused. Move these functions under the conditional compile with CONFIG_SMC. Also codify the suggestion to also configure the SMC protocol in ism's Kconfig - but with an "imply" rather than a "select" as SMC depends on other config options and allow for a deliberate decision not to build SMC. Also, mention that in ISM's help. Fixes: a72178cfe855 ("net/smc: Fix dependency of SMC on ISM") Reported-by: Randy Dunlap Closes: https://lore.kernel.org/netdev/afd142a2-1fa0-46b9-8b2d-7652d41d3ab8@infradead.org/ Signed-off-by: Gerd Bayer Reviewed-by: Wenjia Zhang Reviewed-by: Simon Horman Acked-by: Randy Dunlap Tested-by: Randy Dunlap # build-tested Signed-off-by: David S. Miller --- drivers/s390/net/Kconfig | 3 +- drivers/s390/net/ism_drv.c | 93 +++++++++++++++++++------------------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 4902d45e929c..c61e6427384c 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -103,10 +103,11 @@ config CCWGROUP config ISM tristate "Support for ISM vPCI Adapter" depends on PCI + imply SMC default n help Select this option if you want to use the Internal Shared Memory - vPCI Adapter. + vPCI Adapter. The adapter can be used with the SMC network protocol. To compile as a module choose M. The module name is ism. If unsure, choose N. diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 6df7f377d2f9..81aabbfbbe2c 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -30,7 +30,6 @@ static const struct pci_device_id ism_device_table[] = { MODULE_DEVICE_TABLE(pci, ism_device_table); static debug_info_t *ism_debug_info; -static const struct smcd_ops ism_ops; #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */ static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */ @@ -289,22 +288,6 @@ out: return ret; } -static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid, - u32 vid) -{ - union ism_query_rgid cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.request.hdr.cmd = ISM_QUERY_RGID; - cmd.request.hdr.len = sizeof(cmd.request); - - cmd.request.rgid = rgid; - cmd.request.vlan_valid = vid_valid; - cmd.request.vlan_id = vid; - - return ism_cmd(ism, &cmd); -} - static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb) { clear_bit(dmb->sba_idx, ism->sba_bitmap); @@ -429,23 +412,6 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id) return ism_cmd(ism, &cmd); } -static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq, - u32 event_code, u64 info) -{ - union ism_sig_ieq cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; - cmd.request.hdr.len = sizeof(cmd.request); - - cmd.request.rgid = rgid; - cmd.request.trigger_irq = trigger_irq; - cmd.request.event_code = event_code; - cmd.request.info = info; - - return ism_cmd(ism, &cmd); -} - static unsigned int max_bytes(unsigned int start, unsigned int len, unsigned int boundary) { @@ -503,14 +469,6 @@ u8 *ism_get_seid(void) } EXPORT_SYMBOL_GPL(ism_get_seid); -static u16 ism_get_chid(struct ism_dev *ism) -{ - if (!ism || !ism->pdev) - return 0; - - return to_zpci(ism->pdev)->pchid; -} - static void ism_handle_event(struct ism_dev *ism) { struct ism_event *entry; @@ -569,11 +527,6 @@ static irqreturn_t ism_handle_irq(int irq, void *data) return IRQ_HANDLED; } -static u64 ism_get_local_gid(struct ism_dev *ism) -{ - return ism->local_gid; -} - static int ism_dev_init(struct ism_dev *ism) { struct pci_dev *pdev = ism->pdev; @@ -774,6 +727,22 @@ module_exit(ism_exit); /*************************** SMC-D Implementation *****************************/ #if IS_ENABLED(CONFIG_SMC) +static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid, + u32 vid) +{ + union ism_query_rgid cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_RGID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.vlan_valid = vid_valid; + cmd.request.vlan_id = vid; + + return ism_cmd(ism, &cmd); +} + static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid, u32 vid) { @@ -811,6 +780,23 @@ static int smcd_reset_vlan_required(struct smcd_dev *smcd) return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); } +static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq, + u32 event_code, u64 info) +{ + union ism_sig_ieq cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.trigger_irq = trigger_irq; + cmd.request.event_code = event_code; + cmd.request.info = info; + + return ism_cmd(ism, &cmd); +} + static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq, u32 event_code, u64 info) { @@ -830,11 +816,24 @@ static int smcd_supports_v2(void) SYSTEM_EID.type[0] != '0'; } +static u64 ism_get_local_gid(struct ism_dev *ism) +{ + return ism->local_gid; +} + static u64 smcd_get_local_gid(struct smcd_dev *smcd) { return ism_get_local_gid(smcd->priv); } +static u16 ism_get_chid(struct ism_dev *ism) +{ + if (!ism || !ism->pdev) + return 0; + + return to_zpci(ism->pdev)->pchid; +} + static u16 smcd_get_chid(struct smcd_dev *smcd) { return ism_get_chid(smcd->priv); From 75a50c4f5b957a34dedb7c8cce52b582a9162828 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 16 Nov 2023 18:01:41 +0100 Subject: [PATCH 188/299] kselftest: rtnetlink: fix ip route command typo The blamed commit below introduced a typo causing 'gretap' test-case failures: ./rtnetlink.sh -t kci_test_gretap -v COMMAND: ip link add name test-dummy0 type dummy COMMAND: ip link set test-dummy0 up COMMAND: ip netns add testns COMMAND: ip link help gretap 2>&1 | grep -q '^Usage:' COMMAND: ip -netns testns link add dev gretap00 type gretap seq key 102 local 172.16.1.100 remote 172.16.1.200 COMMAND: ip -netns testns addr add dev gretap00 10.1.1.100/24 COMMAND: ip -netns testns link set dev gretap00 ups Error: either "dev" is duplicate, or "ups" is a garbage. COMMAND: ip -netns testns link del gretap00 COMMAND: ip -netns testns link add dev gretap00 type gretap external COMMAND: ip -netns testns link del gretap00 FAIL: gretap Fix it by using the correct keyword. Fixes: 9c2a19f71515 ("kselftest: rtnetlink.sh: add verbose flag") Signed-off-by: Paolo Abeni Reviewed-by: Hangbin Liu Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 5f2b3f6c0d74..38be9706c45f 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -859,7 +859,7 @@ kci_test_gretap() run_cmd ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24 - run_cmd ip -netns "$testns" link set dev $DEV_NS ups + run_cmd ip -netns "$testns" link set dev $DEV_NS up run_cmd ip -netns "$testns" link del "$DEV_NS" # test external mode From 3798680f2fbbe0ca3ab6138b34e0d161c36497ee Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 16 Nov 2023 13:12:58 +0000 Subject: [PATCH 189/299] rxrpc: Fix RTT determination to use any ACK as a source Fix RTT determination to be able to use any type of ACK as the response from which RTT can be calculated provided its ack.serial is non-zero and matches the serial number of an outgoing DATA or ACK packet. This shouldn't be limited to REQUESTED-type ACKs as these can have other types substituted for them for things like duplicate or out-of-order packets. Fixes: 4700c4d80b7b ("rxrpc: Fix loss of RTT samples due to interposed ACK") Signed-off-by: David Howells cc: Marc Dionne cc: "David S. Miller" cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: linux-afs@lists.infradead.org cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- include/trace/events/rxrpc.h | 2 +- net/rxrpc/input.c | 35 ++++++++++++++++------------------- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 4c53a5ef6257..f7e537f64db4 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -328,7 +328,7 @@ E_(rxrpc_rtt_tx_ping, "PING") #define rxrpc_rtt_rx_traces \ - EM(rxrpc_rtt_rx_cancel, "CNCL") \ + EM(rxrpc_rtt_rx_other_ack, "OACK") \ EM(rxrpc_rtt_rx_obsolete, "OBSL") \ EM(rxrpc_rtt_rx_lost, "LOST") \ EM(rxrpc_rtt_rx_ping_response, "PONG") \ diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 030d64f282f3..3f9594d12519 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -643,12 +643,8 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call, clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); smp_mb(); /* Read data before setting avail bit */ set_bit(i, &call->rtt_avail); - if (type != rxrpc_rtt_rx_cancel) - rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial, - sent_at, resp_time); - else - trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i, - orig_serial, acked_serial, 0, 0); + rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial, + sent_at, resp_time); matched = true; } @@ -801,20 +797,21 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) summary.ack_reason, nr_acks); rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]); - switch (ack.reason) { - case RXRPC_ACK_PING_RESPONSE: - rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, - rxrpc_rtt_rx_ping_response); - break; - case RXRPC_ACK_REQUESTED: - rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, - rxrpc_rtt_rx_requested_ack); - break; - default: - if (acked_serial != 0) + if (acked_serial != 0) { + switch (ack.reason) { + case RXRPC_ACK_PING_RESPONSE: rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, - rxrpc_rtt_rx_cancel); - break; + rxrpc_rtt_rx_ping_response); + break; + case RXRPC_ACK_REQUESTED: + rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, + rxrpc_rtt_rx_requested_ack); + break; + default: + rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, + rxrpc_rtt_rx_other_ack); + break; + } } if (ack.reason == RXRPC_ACK_PING) { From 1a01319feef7047aa2ba400ffa3e047776aa29ca Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 16 Nov 2023 13:12:59 +0000 Subject: [PATCH 190/299] rxrpc: Defer the response to a PING ACK until we've parsed it Defer the generation of a PING RESPONSE ACK in response to a PING ACK until we've parsed the PING ACK so that we pick up any changes to the packet queue so that we can update ackinfo. This is also applied to an ACK generated in response to an ACK with the REQUEST_ACK flag set. Note that whilst the problem was added in commit 248f219cb8bc, it didn't really matter at that point because the ACK was proposed in softirq mode and generated asynchronously later in process context, taking the latest values at the time. But this fix is only needed since the move to parse incoming packets in an I/O thread rather than in softirq and generate the ACK at point of proposal (b0346843b1076b34a0278ff601f8f287535cb064). Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code") Signed-off-by: David Howells cc: Marc Dionne cc: "David S. Miller" cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: linux-afs@lists.infradead.org cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- net/rxrpc/input.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 3f9594d12519..92495e73b869 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -814,14 +814,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) } } - if (ack.reason == RXRPC_ACK_PING) { - rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial, - rxrpc_propose_ack_respond_to_ping); - } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { - rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial, - rxrpc_propose_ack_respond_to_ack); - } - /* If we get an EXCEEDS_WINDOW ACK from the server, it probably * indicates that the client address changed due to NAT. The server * lost the call because it switched to a different peer. @@ -832,7 +824,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) rxrpc_is_client_call(call)) { rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 0, -ENETRESET); - return; + goto send_response; } /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also @@ -846,7 +838,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) rxrpc_is_client_call(call)) { rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 0, -ENETRESET); - return; + goto send_response; } /* Discard any out-of-order or duplicate ACKs (outside lock). */ @@ -854,7 +846,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, first_soft_ack, call->acks_first_seq, prev_pkt, call->acks_prev_seq); - return; + goto send_response; } info.rxMTU = 0; @@ -894,7 +886,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) case RXRPC_CALL_SERVER_AWAIT_ACK: break; default: - return; + goto send_response; } if (before(hard_ack, call->acks_hard_ack) || @@ -906,7 +898,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) if (after(hard_ack, call->acks_hard_ack)) { if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) { rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack); - return; + goto send_response; } } @@ -924,6 +916,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) rxrpc_propose_ack_ping_for_lost_reply); rxrpc_congestion_management(call, skb, &summary, acked_serial); + +send_response: + if (ack.reason == RXRPC_ACK_PING) + rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial, + rxrpc_propose_ack_respond_to_ping); + else if (sp->hdr.flags & RXRPC_REQUEST_ACK) + rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial, + rxrpc_propose_ack_respond_to_ack); } /* From 76df934c6d5f5c93ba7a0112b1818620ddc10b19 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 16 Nov 2023 12:11:51 -0800 Subject: [PATCH 191/299] MAINTAINERS: Add netdev subsystem profile link The netdev subsystem has had a subsystem process document for a while now. Link it appropriately in MAINTAINERS with the P: tag. Cc: Jakub Kicinski Cc: "David S. Miller" Cc: Eric Dumazet Cc: Paolo Abeni Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 482d428472e7..80a95878c61d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14994,6 +14994,7 @@ M: Jakub Kicinski M: Paolo Abeni L: netdev@vger.kernel.org S: Maintained +P: Documentation/process/maintainer-netdev.rst Q: https://patchwork.kernel.org/project/netdevbpf/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git @@ -15045,6 +15046,7 @@ M: Jakub Kicinski M: Paolo Abeni L: netdev@vger.kernel.org S: Maintained +P: Documentation/process/maintainer-netdev.rst Q: https://patchwork.kernel.org/project/netdevbpf/list/ B: mailto:netdev@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git From 9e0be3f50c0e8517d0238b62409c20bcb8cd8785 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Fri, 10 Nov 2023 13:07:22 +0100 Subject: [PATCH 192/299] linux/export: clean up the IA-64 KSYM_FUNC macro With commit cf8e8658100d ("arch: Remove Itanium (IA-64) architecture"), there is no need to keep the IA-64 definition of the KSYM_FUNC macro. Clean up the IA-64 definition of the KSYM_FUNC macro. Signed-off-by: Lukas Bulwahn Reviewed-by: Nathan Chancellor Signed-off-by: Masahiro Yamada --- include/linux/export-internal.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h index 45fca09b2319..69501e0ec239 100644 --- a/include/linux/export-internal.h +++ b/include/linux/export-internal.h @@ -50,9 +50,7 @@ " .previous" "\n" \ ) -#ifdef CONFIG_IA64 -#define KSYM_FUNC(name) @fptr(name) -#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT) +#if defined(CONFIG_PARISC) && defined(CONFIG_64BIT) #define KSYM_FUNC(name) P%name #else #define KSYM_FUNC(name) name From 76020731d4ee897411ce4a73916ed805ea15d946 Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Fri, 10 Nov 2023 17:28:01 -0700 Subject: [PATCH 193/299] kbuild: Move the single quotes for image name Add quotes where UIMAGE_NAME is used, rather than where it is defined. This allows the UIMAGE_NAME variable to be set by the user. Signed-off-by: Simon Glass Signed-off-by: Masahiro Yamada --- scripts/Makefile.lib | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 68d0134bdbf9..1a965fe68e01 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -487,14 +487,14 @@ UIMAGE_OPTS-y ?= UIMAGE_TYPE ?= kernel UIMAGE_LOADADDR ?= arch_must_set_this UIMAGE_ENTRYADDR ?= $(UIMAGE_LOADADDR) -UIMAGE_NAME ?= 'Linux-$(KERNELRELEASE)' +UIMAGE_NAME ?= Linux-$(KERNELRELEASE) quiet_cmd_uimage = UIMAGE $@ cmd_uimage = $(BASH) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \ -C $(UIMAGE_COMPRESSION) $(UIMAGE_OPTS-y) \ -T $(UIMAGE_TYPE) \ -a $(UIMAGE_LOADADDR) -e $(UIMAGE_ENTRYADDR) \ - -n $(UIMAGE_NAME) -d $< $@ + -n '$(UIMAGE_NAME)' -d $< $@ # XZ # --------------------------------------------------------------------------- From ae1eff0349f2e908fc083630e8441ea6dc434dc0 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 15 Nov 2023 13:16:53 +0900 Subject: [PATCH 194/299] kconfig: fix memory leak from range properties Currently, sym_validate_range() duplicates the range string using xstrdup(), which is overwritten by a subsequent sym_calc_value() call. It results in a memory leak. Instead, only the pointer should be copied. Below is a test case, with a summary from Valgrind. [Test Kconfig] config FOO int "foo" range 10 20 [Test .config] CONFIG_FOO=0 [Before] LEAK SUMMARY: definitely lost: 3 bytes in 1 blocks indirectly lost: 0 bytes in 0 blocks possibly lost: 0 bytes in 0 blocks still reachable: 17,465 bytes in 21 blocks suppressed: 0 bytes in 0 blocks [After] LEAK SUMMARY: definitely lost: 0 bytes in 0 blocks indirectly lost: 0 bytes in 0 blocks possibly lost: 0 bytes in 0 blocks still reachable: 17,462 bytes in 20 blocks suppressed: 0 bytes in 0 blocks Signed-off-by: Masahiro Yamada --- scripts/kconfig/symbol.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index 0572330bf8a7..a76925b46ce6 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c @@ -122,9 +122,9 @@ static long long sym_get_range_val(struct symbol *sym, int base) static void sym_validate_range(struct symbol *sym) { struct property *prop; + struct symbol *range_sym; int base; long long val, val2; - char str[64]; switch (sym->type) { case S_INT: @@ -140,17 +140,15 @@ static void sym_validate_range(struct symbol *sym) if (!prop) return; val = strtoll(sym->curr.val, NULL, base); - val2 = sym_get_range_val(prop->expr->left.sym, base); + range_sym = prop->expr->left.sym; + val2 = sym_get_range_val(range_sym, base); if (val >= val2) { - val2 = sym_get_range_val(prop->expr->right.sym, base); + range_sym = prop->expr->right.sym; + val2 = sym_get_range_val(range_sym, base); if (val <= val2) return; } - if (sym->type == S_INT) - sprintf(str, "%lld", val2); - else - sprintf(str, "0x%llx", val2); - sym->curr.val = xstrdup(str); + sym->curr.val = range_sym->curr.val; } static void sym_set_changed(struct symbol *sym) From 1ffa8602e39b89469dc703ebab7a7e44c33da0f7 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 13 Sep 2023 16:18:44 -0400 Subject: [PATCH 195/299] drm/amd/display: Guard against invalid RPTR/WPTR being set [WHY] HW can return invalid values on register read, guard against these being set and causing us to access memory out of range and page fault. [HOW] Guard at sync_inbox1 and guard at pushing commands. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Hansen Dsouza Acked-by: Alex Hung Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dmub/src/dmub_srv.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index e43e8d4bfe37..5d36f3e5dc2b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -707,9 +707,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) return DMUB_STATUS_INVALID; if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { - dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); - dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); - dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; + uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub); + + if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) { + return DMUB_STATUS_HW_FAILURE; + } else { + dmub->inbox1_rb.rptr = rptr; + dmub->inbox1_rb.wrpt = wptr; + dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; + } } return DMUB_STATUS_OK; @@ -743,6 +750,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, if (!dmub->hw_init) return DMUB_STATUS_INVALID; + if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity || + dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) { + return DMUB_STATUS_HW_FAILURE; + } + if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) return DMUB_STATUS_OK; From 0288603040c38ccfeb5342f34a52673366d90038 Mon Sep 17 00:00:00 2001 From: Victor Lu Date: Wed, 4 Oct 2023 14:24:15 -0400 Subject: [PATCH 196/299] drm/amdgpu: Do not program VF copy regs in mmhub v1.8 under SRIOV (v2) MC_VM_AGP_* registers should not be programmed by guest driver. v2: move early return outside of loop Signed-off-by: Victor Lu Reviewed-by: Samir Dhume Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index ea142611be1c..9b0146732e13 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -130,6 +130,9 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) uint64_t value; int i; + if (amdgpu_sriov_vf(adev)) + return; + inst_mask = adev->aid_mask; for_each_inst(i, inst_mask) { /* Program the AGP BAR */ @@ -139,9 +142,6 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); - if (amdgpu_sriov_vf(adev)) - return; - /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); From bdb72185d310fc8049c7ea95221d640e9e7165e5 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 13 Nov 2023 18:05:34 +0800 Subject: [PATCH 197/299] drm/amdgpu: finalizing mem_partitions at the end of GMC v9 sw_fini The valid num_mem_partitions is required during ttm pool fini, thus move the cleanup at the end of the function. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index bde25eb4ed8e..c1f2f166f064 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -2170,8 +2170,6 @@ static int gmc_v9_0_sw_fini(void *handle) if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) amdgpu_gmc_sysfs_fini(adev); - adev->gmc.num_mem_partitions = 0; - kfree(adev->gmc.mem_partitions); amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); @@ -2185,6 +2183,9 @@ static int gmc_v9_0_sw_fini(void *handle) amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); amdgpu_bo_fini(adev); + adev->gmc.num_mem_partitions = 0; + kfree(adev->gmc.mem_partitions); + return 0; } From 8a0173cd90984835645022bf1997abd1bcd81aae Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Sun, 12 Nov 2023 09:51:19 +0530 Subject: [PATCH 198/299] drm/amdgpu: Address member 'ring' not described in 'amdgpu_ vce, uvd_entity_init()' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the following: drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c:237: warning: Function parameter or member 'ring' not described in 'amdgpu_vce_entity_init' drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c:405: warning: Function parameter or member 'ring' not described in 'amdgpu_uvd_entity_init' Cc: Christian König Cc: Alex Deucher Cc: "Pan, Xinhui" Signed-off-by: Srinivasan Shanmugam Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 65949cc7abb9..07d930339b07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -398,6 +398,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) * amdgpu_uvd_entity_init - init entity * * @adev: amdgpu_device pointer + * @ring: amdgpu_ring pointer to check * * Initialize the entity used for handle management in the kernel driver. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 0954447f689d..59acf424a078 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -230,6 +230,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) * amdgpu_vce_entity_init - init entity * * @adev: amdgpu_device pointer + * @ring: amdgpu_ring pointer to check * * Initialize the entity used for handle management in the kernel driver. */ From a58555359a9f870543aaddef277c3396159895ce Mon Sep 17 00:00:00 2001 From: Fangzhi Zuo Date: Mon, 23 Oct 2023 13:57:32 -0400 Subject: [PATCH 199/299] drm/amd/display: Fix DSC not Enabled on Direct MST Sink [WHY & HOW] For the scenario when a dsc capable MST sink device is directly connected, it needs to use max dsc compression as the link bw constraint. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Roman Li Acked-by: Alex Hung Signed-off-by: Fangzhi Zuo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index d3b13d362eda..11da0eebee6c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1604,31 +1604,31 @@ enum dc_status dm_dp_mst_is_port_support_mode( unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; unsigned int max_compressed_bw_in_kbps = 0; struct dc_dsc_bw_range bw_range = {0}; - struct drm_dp_mst_topology_mgr *mst_mgr; + uint16_t full_pbn = aconnector->mst_output_port->full_pbn; /* - * check if the mode could be supported if DSC pass-through is supported - * AND check if there enough bandwidth available to support the mode - * with DSC enabled. + * Consider the case with the depth of the mst topology tree is equal or less than 2 + * A. When dsc bitstream can be transmitted along the entire path + * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND + * 2. dsc passthrough supported at MST branch, or + * 3. dsc decoding supported at leaf MST device + * Use maximum dsc compression as bw constraint + * B. When dsc bitstream cannot be transmitted along the entire path + * Use native bw as bw constraint */ if (is_dsc_common_config_possible(stream, &bw_range) && - aconnector->mst_output_port->passthrough_aux) { - mst_mgr = aconnector->mst_output_port->mgr; - mutex_lock(&mst_mgr->lock); - + (aconnector->mst_output_port->passthrough_aux || + aconnector->dsc_aux == &aconnector->mst_output_port->aux)) { cur_link_settings = stream->link->verified_link_cap; upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, - &cur_link_settings - ); - down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); + &cur_link_settings); + down_link_bw_in_kbps = kbps_from_pbn(full_pbn); /* pick the bottleneck */ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps); - mutex_unlock(&mst_mgr->lock); - /* * use the maximum dsc compression bandwidth as the required * bandwidth for the mode @@ -1643,8 +1643,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( /* check if mode could be supported within full_pbn */ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); - - if (pbn > aconnector->mst_output_port->full_pbn) + if (pbn > full_pbn) return DC_FAIL_BANDWIDTH_VALIDATE; } From 50d51374b498457c4dea26779d32ccfed12ddaff Mon Sep 17 00:00:00 2001 From: YuanShang Date: Tue, 31 Oct 2023 10:32:37 +0800 Subject: [PATCH 200/299] drm/amdgpu: correct chunk_ptr to a pointer to chunk. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The variable "chunk_ptr" should be a pointer pointing to a struct drm_amdgpu_cs_chunk instead of to a pointer of that. Signed-off-by: YuanShang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index df3ecfa9e13f..e50be6500030 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -207,7 +207,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, } for (i = 0; i < p->nchunks; i++) { - struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; + struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL; struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; From 786c355797b3942725829d02ce9e2e6a9eba11fe Mon Sep 17 00:00:00 2001 From: Asad Kamal Date: Tue, 31 Oct 2023 03:14:02 +0800 Subject: [PATCH 201/299] drm/amd/pm: Update metric table for smu v13_0_6 Update pmfw metric table to include pcie instantaneous bandwidth & pcie error counters Signed-off-by: Asad Kamal Reviewed-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index dab35d878a90..fef2d290f3f2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0x8 +#define SMU_METRICS_TABLE_VERSION 0x9 typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -211,6 +211,14 @@ typedef struct __attribute__((packed, aligned(4))) { //XGMI Data tranfser size uint64_t XgmiReadDataSizeAcc[8];//in KByte uint64_t XgmiWriteDataSizeAcc[8];//in KByte + + //PCIE BW Data and error count + uint32_t PcieBandwidth[4]; + uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated + uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated + uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated } MetricsTable_t; #define SMU_VF_METRICS_TABLE_VERSION 0x3 From e4d0be18243ca006258b5c7c148796c0b43505c4 Mon Sep 17 00:00:00 2001 From: Asad Kamal Date: Tue, 14 Nov 2023 16:17:17 +0800 Subject: [PATCH 202/299] drm/amd/pm: Fill pcie error counters for gpu v1_4 Fill PCIE error counters & instantaneous bandwidth in gpu metrics v1_4 for smu v_13_0_6 Signed-off-by: Asad Kamal Reviewed-by: Le Ma Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 891605d4975f..84fac499cf0c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2095,6 +2095,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table smu_v13_0_6_get_current_pcie_link_speed(smu); gpu_metrics->pcie_bandwidth_acc = SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]); + gpu_metrics->pcie_bandwidth_inst = + SMUQ10_ROUND(metrics->PcieBandwidth[0]); + gpu_metrics->pcie_l0_to_recov_count_acc = + metrics->PCIeL0ToRecoveryCountAcc; + gpu_metrics->pcie_replay_count_acc = + metrics->PCIenReplayAAcc; + gpu_metrics->pcie_replay_rover_count_acc = + metrics->PCIenReplayARolloverCountAcc; } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); From 9725a4f9eb495bfa6c7f5ccdb49440ff06dba0a1 Mon Sep 17 00:00:00 2001 From: Muhammad Ahmed Date: Tue, 31 Oct 2023 16:03:21 -0400 Subject: [PATCH 203/299] drm/amd/display: Add null checks for 8K60 lightup [WHY & HOW] Add some null checks to fix an issue where 8k60 tiled display fails to light up. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Charlene Liu Acked-by: Alex Hung Signed-off-by: Muhammad Ahmed Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7b9bf5cb4529..d8f434738212 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3178,7 +3178,7 @@ static bool update_planes_and_stream_state(struct dc *dc, struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, context->streams[i]); - if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) + if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) resource_build_test_pattern_params(&context->res_ctx, otg_master); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 1d48278cba96..a1f1d1003992 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -5190,6 +5190,9 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy( sec_next = sec_pipe->next_odm_pipe; sec_prev = sec_pipe->prev_odm_pipe; + if (pri_pipe == NULL) + return false; + *sec_pipe = *pri_pipe; sec_pipe->top_pipe = sec_top; From b71f4ade1b8900d30c661d6c27f87c35214c398c Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 8 Nov 2023 13:31:57 -0600 Subject: [PATCH 204/299] drm/amd/display: fix a NULL pointer dereference in amdgpu_dm_i2c_xfer() When ddc_service_construct() is called, it explicitly checks both the link type and whether there is something on the link which will dictate whether the pin is marked as hw_supported. If the pin isn't set or the link is not set (such as from unloading/reloading amdgpu in an IGT test) then fail the amdgpu_dm_i2c_xfer() call. Cc: stable@vger.kernel.org Fixes: 22676bc500c2 ("drm/amd/display: Fix dmub soft hang for PSR 1") Link: https://github.com/fwupd/fwupd/issues/6327 Signed-off-by: Mario Limonciello Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6f99f6754c11..5ec7acf65ee1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7481,6 +7481,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO; + if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) + return result; + cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); if (!cmd.payloads) From 270b301beca58e427a0fda7523a71a9562e644bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Pekkarinen?= Date: Tue, 14 Nov 2023 17:27:51 +0200 Subject: [PATCH 205/299] drm/amd/display: fix NULL dereference MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The following patch will fix a minor issue where a debug message is referencing an struct that has just being checked whether is null or not. This has been noticed by using coccinelle, in the following output: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c:540:25-29: ERROR: aconnector is NULL but dereferenced. Fixes: 5d72e247e58c ("drm/amd/display: switch DC over to the new DRM logging macros") Signed-off-by: José Pekkarinen Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ed784cf27d39..c7a29bb737e2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -536,11 +536,8 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; - if (!aconnector) { - drm_dbg_dp(aconnector->base.dev, - "Failed to find connector for link!\n"); + if (!aconnector) return false; - } return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, size) == size; From 435f5b369657cffee4b04db1f5805b48599f4dbe Mon Sep 17 00:00:00 2001 From: Tianci Yin Date: Wed, 1 Nov 2023 09:47:13 +0800 Subject: [PATCH 206/299] drm/amd/display: Enable fast plane updates on DCN3.2 and above [WHY] When cursor moves across screen boarder, lag cursor observed, since subvp settings need to sync up with vblank that causes cursor updates being delayed. [HOW] Enable fast plane updates on DCN3.2 to fix it. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Aurabindo Pillai Acked-by: Alex Hung Signed-off-by: Tianci Yin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5ec7acf65ee1..9581510d3740 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9606,14 +9606,14 @@ static bool should_reset_plane(struct drm_atomic_state *state, struct drm_plane *other; struct drm_plane_state *old_other_state, *new_other_state; struct drm_crtc_state *new_crtc_state; + struct amdgpu_device *adev = drm_to_adev(plane->dev); int i; /* - * TODO: Remove this hack once the checks below are sufficient - * enough to determine when we need to reset all the planes on - * the stream. + * TODO: Remove this hack for all asics once it proves that the + * fast updates works fine on DCN3.2+. */ - if (state->allow_modeset) + if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) return true; /* Exit early if we know that we're adding or removing the plane. */ From 923bbfe6c888812db1088d684bd30c24036226d2 Mon Sep 17 00:00:00 2001 From: Paul Hsieh Date: Wed, 25 Oct 2023 10:53:35 +0800 Subject: [PATCH 207/299] drm/amd/display: Clear dpcd_sink_ext_caps if not set [WHY] Some eDP panels' ext caps don't set initial values and the value of dpcd_addr (0x317) is random. It means that sometimes the eDP can be OLED, miniLED and etc, and cause incorrect backlight control interface. [HOW] Add remove_sink_ext_caps to remove sink ext caps (HDR, OLED and etc) Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Anthony Koo Acked-by: Alex Hung Signed-off-by: Paul Hsieh Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_types.h | 1 + drivers/gpu/drm/amd/display/dc/link/link_detection.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index cea666ea66c6..fcb825e4f1bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -177,6 +177,7 @@ struct dc_panel_patch { unsigned int disable_fams; unsigned int skip_avmute; unsigned int mst_start_top_delay; + unsigned int remove_sink_ext_caps; }; struct dc_edid_caps { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index d6f0f857c05a..f2fe523f914f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -1088,6 +1088,9 @@ static bool detect_link_and_local_sink(struct dc_link *link, if (sink->edid_caps.panel_patch.skip_scdc_overwrite) link->ctx->dc->debug.hdmi20_disable = true; + if (sink->edid_caps.panel_patch.remove_sink_ext_caps) + link->dpcd_sink_ext_caps.raw = 0; + if (dc_is_hdmi_signal(link->connector_signal)) read_scdc_caps(link->ddc, link->local_sink); From 07ee43faeb7eb088e49a7549fcabcae94c443d3b Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 13 Nov 2023 14:34:55 +0800 Subject: [PATCH 208/299] drm/amdgpu: fix ras err_data null pointer issue in amdgpu_ras.c fix ras err_data null pointer issue in amdgpu_ras.c Fixes: 8cc0f5669eb6 ("drm/amdgpu: Support multiple error query modes") Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 84e5987b14e0..a3dc68e98910 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1188,7 +1188,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, } if (block_obj->hw_ops->query_ras_error_count) - block_obj->hw_ops->query_ras_error_count(adev, &err_data); + block_obj->hw_ops->query_ras_error_count(adev, err_data); if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || (info->head.block == AMDGPU_RAS_BLOCK__GFX) || From 0f216364625cb453b4f933deacfa92df7f2a2fc9 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 10 Nov 2023 13:15:39 +0530 Subject: [PATCH 209/299] drm/amd/pm: Don't send unload message for reset No need to notify about unload during reset. Also remove the FW version check. Signed-off-by: Lijo Lazar Reviewed-by: Yang Wang Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 84fac499cf0c..0e5a77c3c2e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1454,7 +1454,7 @@ static int smu_v13_0_6_register_irq_handler(struct smu_context *smu) static int smu_v13_0_6_notify_unload(struct smu_context *smu) { - if (smu->smc_fw_version <= 0x553500) + if (amdgpu_in_reset(smu->adev)) return 0; dev_dbg(smu->adev->dev, "Notify PMFW about driver unload"); From 5e8a0d3598b47ee5a57708072bdef08816264538 Mon Sep 17 00:00:00 2001 From: Duncan Ma Date: Wed, 25 Oct 2023 19:07:21 -0400 Subject: [PATCH 210/299] drm/amd/display: Negate IPS allow and commit bits [WHY] On s0i3, IPS mask isn't saved and restored. It is reset to zero on exit. If it is cleared unexpectedly, driver will proceed operations while DCN is in IPS2 and cause a hang. [HOW] Negate the bit logic. Default value of zero indicates it is still in IPS2. Driver must poll for the bit to assert. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Charlene Liu Acked-by: Alex Hung Signed-off-by: Duncan Ma Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 18 +++++++++--------- drivers/gpu/drm/amd/display/dc/core/dc.c | 4 ++-- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 10 +++++----- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 0fa4fcd00de2..507a7cf56711 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -820,22 +820,22 @@ static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) if (dc->config.disable_ips == DMUB_IPS_ENABLE || dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { val = val & ~DMUB_IPS1_ALLOW_MASK; val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { - val |= DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { val |= DMUB_IPS1_ALLOW_MASK; val |= DMUB_IPS2_ALLOW_MASK; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { + val = val & ~DMUB_IPS1_ALLOW_MASK; + val |= DMUB_IPS2_ALLOW_MASK; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + val = val & ~DMUB_IPS1_ALLOW_MASK; + val = val & ~DMUB_IPS2_ALLOW_MASK; } if (!allow_idle) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; + val |= DMUB_IPS1_ALLOW_MASK; + val |= DMUB_IPS2_ALLOW_MASK; } dcn35_smu_write_ips_scratch(clk_mgr, val); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d8f434738212..76b47f178127 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4934,8 +4934,8 @@ bool dc_dmub_is_ips_idle_state(struct dc *dc) if (dc->hwss.get_idle_state) idle_state = dc->hwss.get_idle_state(dc); - if ((idle_state & DMUB_IPS1_ALLOW_MASK) || - (idle_state & DMUB_IPS2_ALLOW_MASK)) + if (!(idle_state & DMUB_IPS1_ALLOW_MASK) || + !(idle_state & DMUB_IPS2_ALLOW_MASK)) return true; return false; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index e4c007203318..0e07699c1e83 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1202,11 +1202,11 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc) allow_state = dc->hwss.get_idle_state(dc); dc->hwss.set_idle_state(dc, false); - if (allow_state & DMUB_IPS2_ALLOW_MASK) { + if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) { // Wait for evaluation time udelay(dc->debug.ips2_eval_delay_us); commit_state = dc->hwss.get_idle_state(dc); - if (commit_state & DMUB_IPS2_COMMIT_MASK) { + if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) { // Tell PMFW to exit low power state dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); @@ -1216,7 +1216,7 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc) for (i = 0; i < max_num_polls; ++i) { commit_state = dc->hwss.get_idle_state(dc); - if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) + if (commit_state & DMUB_IPS2_COMMIT_MASK) break; udelay(1); @@ -1235,10 +1235,10 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc) } dc_dmub_srv_notify_idle(dc, false); - if (allow_state & DMUB_IPS1_ALLOW_MASK) { + if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) { for (i = 0; i < max_num_polls; ++i) { commit_state = dc->hwss.get_idle_state(dc); - if (!(commit_state & DMUB_IPS1_COMMIT_MASK)) + if (commit_state & DMUB_IPS1_COMMIT_MASK) break; udelay(1); From 9ddea8c9775d9379d71e6ac1519c552461b90b07 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang Date: Tue, 31 Oct 2023 11:02:49 +0800 Subject: [PATCH 211/299] drm/amdgpu: add and populate the port num into xgmi topology info The port num info is firstly introduced with 20.00.01.13 xgmi ta and make them as part of topology info. Signed-off-by: Shiwu Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 5 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 32b701cc0376..a21045d018f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1473,6 +1473,11 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? topology->nodes[i].num_links : node_num_links; } + /* popluate the connected port num info if supported and available */ + if (ta_port_num_support && topology->nodes[i].num_links) { + memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, + sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); + } /* reflect the topology information for bi-directionality */ if (requires_reflection && topology->nodes[i].num_hops) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 5d36ad3f48c7..c4d9cbde55b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -150,6 +150,7 @@ struct psp_xgmi_node_info { uint8_t is_sharing_enabled; enum ta_xgmi_assigned_sdma_engine sdma_engine; uint8_t num_links; + struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM]; }; struct psp_xgmi_topology_info { From 5911d02cac70d7fb52009fbd37423e63f8f6f9bc Mon Sep 17 00:00:00 2001 From: Lewis Huang Date: Thu, 19 Oct 2023 17:22:21 +0800 Subject: [PATCH 212/299] drm/amd/display: Change the DMCUB mailbox memory location from FB to inbox [WHY] Flush command sent to DMCUB spends more time for execution on a dGPU than on an APU. This causes cursor lag when using high refresh rate mouses. [HOW] 1. Change the DMCUB mailbox memory location from FB to inbox. 2. Only change windows memory to inbox. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Nicholas Kazlauskas Acked-by: Alex Hung Signed-off-by: Lewis Huang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 ++++---- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 22 ++++++++----- .../gpu/drm/amd/display/dmub/src/dmub_srv.c | 32 ++++++++++++++----- 3 files changed, 45 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9581510d3740..ee97814ebd99 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2079,7 +2079,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) struct dmub_srv_create_params create_params; struct dmub_srv_region_params region_params; struct dmub_srv_region_info region_info; - struct dmub_srv_fb_params fb_params; + struct dmub_srv_memory_params memory_params; struct dmub_srv_fb_info *fb_info; struct dmub_srv *dmub_srv; const struct dmcub_firmware_header_v1_0 *hdr; @@ -2182,6 +2182,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->dm.dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + PSP_HEADER_BYTES; + region_params.is_mailbox_in_inbox = false; status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, ®ion_info); @@ -2205,10 +2206,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return r; /* Rebase the regions on the framebuffer address. */ - memset(&fb_params, 0, sizeof(fb_params)); - fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; - fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; - fb_params.region_info = ®ion_info; + memset(&memory_params, 0, sizeof(memory_params)); + memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; + memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; + memory_params.region_info = ®ion_info; adev->dm.dmub_fb_info = kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); @@ -2220,7 +2221,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return -ENOMEM; } - status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); + status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error calculating DMUB FB info: %d\n", status); return -EINVAL; diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 9665ada0f894..df63aa8f01e9 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -195,6 +195,7 @@ struct dmub_srv_region_params { uint32_t vbios_size; const uint8_t *fw_inst_const; const uint8_t *fw_bss_data; + bool is_mailbox_in_inbox; }; /** @@ -214,20 +215,25 @@ struct dmub_srv_region_params { */ struct dmub_srv_region_info { uint32_t fb_size; + uint32_t inbox_size; uint8_t num_regions; struct dmub_region regions[DMUB_WINDOW_TOTAL]; }; /** - * struct dmub_srv_fb_params - parameters used for driver fb setup + * struct dmub_srv_memory_params - parameters used for driver fb setup * @region_info: region info calculated by dmub service - * @cpu_addr: base cpu address for the framebuffer - * @gpu_addr: base gpu virtual address for the framebuffer + * @cpu_fb_addr: base cpu address for the framebuffer + * @cpu_inbox_addr: base cpu address for the gart + * @gpu_fb_addr: base gpu virtual address for the framebuffer + * @gpu_inbox_addr: base gpu virtual address for the gart */ -struct dmub_srv_fb_params { +struct dmub_srv_memory_params { const struct dmub_srv_region_info *region_info; - void *cpu_addr; - uint64_t gpu_addr; + void *cpu_fb_addr; + void *cpu_inbox_addr; + uint64_t gpu_fb_addr; + uint64_t gpu_inbox_addr; }; /** @@ -563,8 +569,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, * DMUB_STATUS_OK - success * DMUB_STATUS_INVALID - unspecified error */ -enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, - const struct dmub_srv_fb_params *params, +enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub, + const struct dmub_srv_memory_params *params, struct dmub_srv_fb_info *out); /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 5d36f3e5dc2b..22fc4ba96def 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -434,7 +434,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; - + uint32_t previous_top = 0; if (!dmub->sw_init) return DMUB_STATUS_INVALID; @@ -459,8 +459,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, bios->base = dmub_align(stack->top, 256); bios->top = bios->base + params->vbios_size; - mail->base = dmub_align(bios->top, 256); - mail->top = mail->base + DMUB_MAILBOX_SIZE; + if (params->is_mailbox_in_inbox) { + mail->base = 0; + mail->top = mail->base + DMUB_MAILBOX_SIZE; + previous_top = bios->top; + } else { + mail->base = dmub_align(bios->top, 256); + mail->top = mail->base + DMUB_MAILBOX_SIZE; + previous_top = mail->top; + } fw_info = dmub_get_fw_meta_info(params); @@ -479,7 +486,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, dmub->fw_version = fw_info->fw_version; } - trace_buff->base = dmub_align(mail->top, 256); + trace_buff->base = dmub_align(previous_top, 256); trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); fw_state->base = dmub_align(trace_buff->top, 256); @@ -490,11 +497,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, out->fb_size = dmub_align(scratch_mem->top, 4096); + if (params->is_mailbox_in_inbox) + out->inbox_size = dmub_align(mail->top, 4096); + return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, - const struct dmub_srv_fb_params *params, +enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub, + const struct dmub_srv_memory_params *params, struct dmub_srv_fb_info *out) { uint8_t *cpu_base; @@ -509,8 +519,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, if (params->region_info->num_regions != DMUB_NUM_WINDOWS) return DMUB_STATUS_INVALID; - cpu_base = (uint8_t *)params->cpu_addr; - gpu_base = params->gpu_addr; + cpu_base = (uint8_t *)params->cpu_fb_addr; + gpu_base = params->gpu_fb_addr; for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { const struct dmub_region *reg = @@ -518,6 +528,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, out->fb[i].cpu_addr = cpu_base + reg->base; out->fb[i].gpu_addr = gpu_base + reg->base; + + if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) { + out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base; + out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base; + } + out->fb[i].size = reg->top - reg->base; } From 0ee057e66c4b782809a0a9265cdac5542e646706 Mon Sep 17 00:00:00 2001 From: Nicholas Susanto Date: Wed, 1 Nov 2023 15:30:10 -0400 Subject: [PATCH 213/299] drm/amd/display: Fix encoder disable logic [WHY] DENTIST hangs when OTG is off and encoder is on. We were not disabling the encoder properly when switching from extended mode to external monitor only. [HOW] Disable the encoder using an existing enable/disable fifo helper instead of enc35_stream_encoder_enable. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Nicholas Kazlauskas Acked-by: Alex Hung Signed-off-by: Nicholas Susanto Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/dcn35/dcn35_dio_stream_encoder.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c index 001f9eb66920..62a8f0b56006 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c @@ -261,12 +261,6 @@ static void enc35_stream_encoder_enable( /* invalid mode ! */ ASSERT_CRITICAL(false); } - - REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1); - REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1); - } else { - REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0); - REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0); } } @@ -436,6 +430,8 @@ static void enc35_disable_fifo(struct stream_encoder *enc) struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0); + REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0); + REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0); } static void enc35_enable_fifo(struct stream_encoder *enc) @@ -443,6 +439,8 @@ static void enc35_enable_fifo(struct stream_encoder *enc) struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); + REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1); + REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1); enc35_reset_fifo(enc, true); enc35_reset_fifo(enc, false); From 564ca1b53ece166b5915c2ac90f3e9313100f4ea Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 14 Nov 2023 11:36:56 -0500 Subject: [PATCH 214/299] drm/amdgpu/gmc11: fix logic typo in AGP check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Should be && rather than ||. Fixes: b2e1cbe6281f ("drm/amdgpu/gmc11: disable AGP on GC 11.5") Acked-by: Christian König Reviewed-by: Mario Limonciello Tested-by: Mario Limonciello # PHX & Navi33 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 6dce9b29f675..ba4c82f5e617 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -640,7 +640,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH); - if (!amdgpu_sriov_vf(adev) || + if (!amdgpu_sriov_vf(adev) && (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0))) amdgpu_gmc_agp_location(adev, mc); From 6ba5b613837c5d997ad8297b22fc46cd0be58d76 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Nov 2023 15:31:00 -0500 Subject: [PATCH 215/299] drm/amdgpu: add a module parameter to control the AGP aperture MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a module parameter to control the AGP aperture. The AGP aperture is an aperture in the GPU's internal address space which provides direct non-paged access to the platform address space. This access is non-snooped so only uncached memory can be accessed. Add a knob so that we can toggle this for debugging. Fixes: 67318cb84341 ("drm/amdgpu/gmc11: set gart placement GC11") Acked-by: Christian König Reviewed-by: Mario Limonciello Tested-by: Mario Limonciello # PHX & Navi33 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 3 ++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 5 files changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index afec09930efa..9d92ca157677 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -248,6 +248,7 @@ extern int amdgpu_umsch_mm; extern int amdgpu_seamless; extern int amdgpu_user_partt_mode; +extern int amdgpu_agp; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3095a3a864af..8f24cabe2155 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -207,6 +207,7 @@ int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE; int amdgpu_umsch_mm; int amdgpu_seamless = -1; /* auto */ uint amdgpu_debug_mask; +int amdgpu_agp = -1; /* auto */ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -961,6 +962,15 @@ module_param_named(seamless, amdgpu_seamless, int, 0444); MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default"); module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444); +/** + * DOC: agp (int) + * Enable the AGP aperture. This provides an aperture in the GPU's internal + * address space for direct access to system memory. Note that these accesses + * are non-snooped, so they are only used for access to uncached memory. + */ +MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)"); +module_param_named(agp, amdgpu_agp, int, 0444); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 0ec7b061d7c2..23483bffa1c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -675,7 +675,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev) && (amdgpu_agp != 0)) amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index ba4c82f5e617..e1078b53e942 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -641,7 +641,8 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH); if (!amdgpu_sriov_vf(adev) && - (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0))) + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) && + (amdgpu_agp != 0)) amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c1f2f166f064..1638c3177799 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1630,7 +1630,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, } else { amdgpu_gmc_vram_location(adev, mc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev) && (amdgpu_agp != 0)) amdgpu_gmc_agp_location(adev, mc); } /* base offset of vram pages */ From 0db062eac3e0846c6f120867a79df83b4c3db46f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Nov 2023 15:34:19 -0500 Subject: [PATCH 216/299] drm/amdgpu/gmc11: disable AGP aperture MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We've had misc reports of random IOMMU page faults when this is used. It's just a rarely used optimization anyway, so let's just disable it. It can still be toggled via the module parameter for testing. v2: leave it configurable via module parameter Fixes: 67318cb84341 ("drm/amdgpu/gmc11: set gart placement GC11") Reviewed-by: Yang Wang (v1) Acked-by: Christian König Reviewed-by: Mario Limonciello Tested-by: Mario Limonciello # PHX & Navi33 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index e1078b53e942..23d7b548d13f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -642,7 +642,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH); if (!amdgpu_sriov_vf(adev) && (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) && - (amdgpu_agp != 0)) + (amdgpu_agp == 1)) amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ From 61fc93695bbfde218d5f9f0b8051ce36eb649669 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Nov 2023 15:38:54 -0500 Subject: [PATCH 217/299] drm/amdgpu/gmc10: disable AGP aperture MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We've had misc reports of random IOMMU page faults when this is used. It's just a rarely used optimization anyway, so let's just disable it. It can still be toggled via the module parameter for testing. v2: leave it configurable via module parameter Reviewed-by: Yang Wang (v1) Acked-by: Christian König Reviewed-by: Mario Limonciello Tested-by: Mario Limonciello # PHX & Navi33 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 23483bffa1c7..a5a05c16c10d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -675,7 +675,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_gmc_set_agp_default(adev, mc); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); - if (!amdgpu_sriov_vf(adev) && (amdgpu_agp != 0)) + if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ From e8c2d3e25b844ad8f7c8b269a7cfd65285329264 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Nov 2023 15:40:00 -0500 Subject: [PATCH 218/299] drm/amdgpu/gmc9: disable AGP aperture MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We've had misc reports of random IOMMU page faults when this is used. It's just a rarely used optimization anyway, so let's just disable it. It can still be toggled via the module parameter for testing. v2: leave it configurable via module parameter Reviewed-by: Yang Wang (v1) Acked-by: Christian König Reviewed-by: Mario Limonciello Tested-by: Mario Limonciello # PHX & Navi33 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 1638c3177799..2ac5820e9c92 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1630,7 +1630,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, } else { amdgpu_gmc_vram_location(adev, mc, base); amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); - if (!amdgpu_sriov_vf(adev) && (amdgpu_agp != 0)) + if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) amdgpu_gmc_agp_location(adev, mc); } /* base offset of vram pages */ From c6ea14d557343cd3af6c6be2f5a78c98bdb281bb Mon Sep 17 00:00:00 2001 From: Shyam Sundar S K Date: Thu, 16 Nov 2023 22:31:21 +0530 Subject: [PATCH 219/299] platform/x86/amd/pmc: adjust getting DRAM size behavior MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit amd_pmc_get_dram_size() is used to get the DRAM size information. But in the current code, mailbox command to get the DRAM size info is sent based on the values of dev->major and dev->minor. But dev->major and dev->minor will have either junk or zero assigned to them until at least once a call to amd_pmc_get_smu_version() is made which ideally populates dev->major and dev->minor. However, adding a amd_pmc_get_smu_version() call to amd_pmc_get_dram_size() has a downside of elevating the boot times. After talking to the PMFW team, it's understood that the "get dram size" mbox command would only be supported on specific platforms (like Mendocino) and not all. So, adjust getting DRAM size behavior such that, - if running on Rembrandt or Mendocino and the underlying PMFW knows how to execute the "get dram size" command it shall give the custom dram size. - if the underlying FW does not report the dram size, we just proceed further and assign the default dram size. The simplest way to address this is to remove amd_pmc_get_dram_size() function and directly call the "get dram size" command in the amd_pmc_s2d_init(). Reported-by: Mark Hasemeyer Fixes: be8325fb3d8c ("platform/x86/amd: pmc: Get STB DRAM size from PMFW") Cc: stable@vger.kernel.org Suggested-by: Sanket Goswami Signed-off-by: Shyam Sundar S K Link: https://lore.kernel.org/r/20231116170121.3372222-1-Shyam-sundar.S-k@amd.com Reviewed-by: Mario Limonciello Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen --- drivers/platform/x86/amd/pmc/pmc.c | 31 ++---------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c index cd6ac04c1468..c3104714b480 100644 --- a/drivers/platform/x86/amd/pmc/pmc.c +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -964,33 +964,6 @@ static const struct pci_device_id pmc_pci_ids[] = { { } }; -static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev) -{ - int ret; - - switch (dev->cpu_id) { - case AMD_CPU_ID_YC: - if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) { - ret = -EINVAL; - goto err_dram_size; - } - break; - default: - ret = -EINVAL; - goto err_dram_size; - } - - ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true); - if (ret || !dev->dram_size) - goto err_dram_size; - - return 0; - -err_dram_size: - dev_err(dev->dev, "DRAM size command not supported for this platform\n"); - return ret; -} - static int amd_pmc_s2d_init(struct amd_pmc_dev *dev) { u32 phys_addr_low, phys_addr_hi; @@ -1009,8 +982,8 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev) return -EIO; /* Get DRAM size */ - ret = amd_pmc_get_dram_size(dev); - if (ret) + ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true); + if (ret || !dev->dram_size) dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX; /* Get STB DRAM address */ From 914fa861e3d7803c9bbafc229652c2a69edb8b60 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Thu, 9 Nov 2023 19:18:22 +0800 Subject: [PATCH 220/299] erofs: simplify erofs_read_inode() After commit 1c7f49a76773 ("erofs: tidy up EROFS on-disk naming"), there is a unique `union erofs_inode_i_u` so that we could parse the union directly. Besides, it also replaces `inode->i_sb` with `sb` for simplicity. Signed-off-by: Ferry Meng Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231109111822.17944-1-mengferry@linux.alibaba.com Signed-off-by: Gao Xiang --- fs/erofs/inode.c | 98 +++++++++++++++++------------------------------- 1 file changed, 35 insertions(+), 63 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index b8ad05b4509d..14a79d3226ab 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -15,11 +15,11 @@ static void *erofs_read_inode(struct erofs_buf *buf, struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_inode *vi = EROFS_I(inode); const erofs_off_t inode_loc = erofs_iloc(inode); - erofs_blk_t blkaddr, nblks = 0; void *kaddr; struct erofs_inode_compact *dic; struct erofs_inode_extended *die, *copied = NULL; + union erofs_inode_i_u iu; unsigned int ifmt; int err; @@ -35,9 +35,8 @@ static void *erofs_read_inode(struct erofs_buf *buf, dic = kaddr + *ofs; ifmt = le16_to_cpu(dic->i_format); - if (ifmt & ~EROFS_I_ALL) { - erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", + erofs_err(sb, "unsupported i_format %u of nid %llu", ifmt, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -45,7 +44,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->datalayout = erofs_inode_datalayout(ifmt); if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { - erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", + erofs_err(sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -82,40 +81,15 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(die->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = die->i_u; i_uid_write(inode, le32_to_cpu(die->i_uid)); i_gid_write(inode, le32_to_cpu(die->i_gid)); set_nlink(inode, le32_to_cpu(die->i_nlink)); - - /* extended inode has its own timestamp */ + /* each extended inode has its own timestamp */ inode_set_ctime(inode, le64_to_cpu(die->i_mtime), le32_to_cpu(die->i_mtime_nsec)); inode->i_size = le64_to_cpu(die->i_size); - - /* total blocks for compressed files */ - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(die->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - /* fill chunked inode summary info */ - vi->chunkformat = le16_to_cpu(die->i_u.c.format); kfree(copied); copied = NULL; break; @@ -125,49 +99,51 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(dic->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = dic->i_u; i_uid_write(inode, le16_to_cpu(dic->i_uid)); i_gid_write(inode, le16_to_cpu(dic->i_gid)); set_nlink(inode, le16_to_cpu(dic->i_nlink)); - /* use build time for compact inodes */ inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec); inode->i_size = le32_to_cpu(dic->i_size); - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(dic->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - vi->chunkformat = le16_to_cpu(dic->i_u.c.format); break; default: - erofs_err(inode->i_sb, - "unsupported on-disk inode version %u of nid %llu", + erofs_err(sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); err = -EOPNOTSUPP; goto err_out; } - if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr); + break; + case S_IFCHR: + case S_IFBLK: + inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev)); + break; + case S_IFIFO: + case S_IFSOCK: + inode->i_rdev = 0; + break; + default: + erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, + vi->nid); + err = -EFSCORRUPTED; + goto err_out; + } + + /* total blocks for compressed files */ + if (erofs_inode_is_data_compressed(vi->datalayout)) { + nblks = le32_to_cpu(iu.compressed_blocks); + } else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + /* fill chunked inode summary info */ + vi->chunkformat = le16_to_cpu(iu.c.format); if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { - erofs_err(inode->i_sb, - "unsupported chunk format %x of nid %llu", + erofs_err(sb, "unsupported chunk format %x of nid %llu", vi->chunkformat, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -191,10 +167,6 @@ static void *erofs_read_inode(struct erofs_buf *buf, inode->i_blocks = nblks << (sb->s_blocksize_bits - 9); return kaddr; -bogusimode: - erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", - inode->i_mode, vi->nid); - err = -EFSCORRUPTED; err_out: DBG_BUGON(1); kfree(copied); From 8bd90b6ae7856dd5000b75691d905b39b9ea5d6b Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Tue, 14 Nov 2023 15:07:04 +0800 Subject: [PATCH 221/299] erofs: fix NULL dereference of dif->bdev_handle in fscache mode Avoid NULL dereference of dif->bdev_handle, as dif->bdev_handle is NULL in fscache mode. BUG: kernel NULL pointer dereference, address: 0000000000000000 RIP: 0010:erofs_map_dev+0xbd/0x1c0 Call Trace: erofs_fscache_data_read_slice+0xa7/0x340 erofs_fscache_data_read+0x11/0x30 erofs_fscache_readahead+0xd9/0x100 read_pages+0x47/0x1f0 page_cache_ra_order+0x1e5/0x270 filemap_get_pages+0xf2/0x5f0 filemap_read+0xb8/0x2e0 vfs_read+0x18d/0x2b0 ksys_read+0x53/0xd0 do_syscall_64+0x42/0xf0 entry_SYSCALL_64_after_hwframe+0x6e/0x76 Reported-by: Yiqun Leng Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=7245 Fixes: 49845720080d ("erofs: Convert to use bdev_open_by_path()") Signed-off-by: Jingbo Xu Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231114070704.23398-1-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang --- fs/erofs/data.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 029c761670bf..c98aeda8abb2 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -220,7 +220,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) up_read(&devs->rwsem); return 0; } - map->m_bdev = dif->bdev_handle->bdev; + map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL; map->m_daxdev = dif->dax_dev; map->m_dax_part_off = dif->dax_part_off; map->m_fscache = dif->fscache; @@ -238,7 +238,8 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) if (map->m_pa >= startoff && map->m_pa < startoff + length) { map->m_pa -= startoff; - map->m_bdev = dif->bdev_handle->bdev; + map->m_bdev = dif->bdev_handle ? + dif->bdev_handle->bdev : NULL; map->m_daxdev = dif->dax_dev; map->m_dax_part_off = dif->dax_part_off; map->m_fscache = dif->fscache; From 62b241efff99fc4d88a86f1c67c7516e31f432a3 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 17 Nov 2023 16:53:29 +0800 Subject: [PATCH 222/299] MAINTAINERS: erofs: add EROFS webpage Add a new `W:` field of the EROFS entry points to the documentation site at . In addition, update the in-tree documentation and Kconfig too. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231117085329.1624223-1-hsiangkao@linux.alibaba.com --- Documentation/filesystems/erofs.rst | 4 ++++ MAINTAINERS | 1 + fs/erofs/Kconfig | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst index 57c6ae23b3fc..cc4626d6ee4f 100644 --- a/Documentation/filesystems/erofs.rst +++ b/Documentation/filesystems/erofs.rst @@ -91,6 +91,10 @@ compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs): - git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git +For more information, please also refer to the documentation site: + +- https://erofs.docs.kernel.org + Bugs and patches are welcome, please kindly help us and send to the following linux-erofs mailing list: diff --git a/MAINTAINERS b/MAINTAINERS index 97f51d5ec1cf..cf39d16ad22a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7855,6 +7855,7 @@ R: Yue Hu R: Jeffle Xu L: linux-erofs@lists.ozlabs.org S: Maintained +W: https://erofs.docs.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git F: Documentation/ABI/testing/sysfs-fs-erofs F: Documentation/filesystems/erofs.rst diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index e540648dedc2..1d318f85232d 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -21,7 +21,7 @@ config EROFS_FS performance under extremely memory pressure without extra cost. See the documentation at - for more details. + and the web pages at for more details. If unsure, say N. From 6fc45b6ed921dc00dfb264dc08c7d67ee63d2656 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 17 Nov 2023 18:21:14 +0100 Subject: [PATCH 223/299] dm-delay: fix a race between delay_presuspend and delay_bio In delay_presuspend, we set the atomic variable may_delay and then stop the timer and flush pending bios. The intention here is to prevent the delay target from re-arming the timer again. However, this test is racy. Suppose that one thread goes to delay_bio, sees that dc->may_delay is one and proceeds; now, another thread executes delay_presuspend, it sets dc->may_delay to zero, deletes the timer and flushes pending bios. Then, the first thread continues and adds the bio to delayed->list despite the fact that dc->may_delay is false. Fix this bug by changing may_delay's type from atomic_t to bool and only access it while holding the delayed_bios_lock mutex. Note that we don't have to grab the mutex in delay_resume because there are no bios in flight at this point. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Signed-off-by: Mike Snitzer --- drivers/md/dm-delay.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index efd510984e25..2d6b900e4353 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -33,7 +33,7 @@ struct delay_c { struct work_struct flush_expired_bios; struct list_head delayed_bios; struct task_struct *worker; - atomic_t may_delay; + bool may_delay; struct delay_class read; struct delay_class write; @@ -236,7 +236,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->private = dc; INIT_LIST_HEAD(&dc->delayed_bios); - atomic_set(&dc->may_delay, 1); + dc->may_delay = true; dc->argc = argc; ret = delay_class_ctr(ti, &dc->read, argv); @@ -312,7 +312,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio) struct dm_delay_info *delayed; unsigned long expires = 0; - if (!c->delay || !atomic_read(&dc->may_delay)) + if (!c->delay) return DM_MAPIO_REMAPPED; delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); @@ -321,6 +321,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio) delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay); mutex_lock(&delayed_bios_lock); + if (unlikely(!dc->may_delay)) { + mutex_unlock(&delayed_bios_lock); + return DM_MAPIO_REMAPPED; + } c->ops++; list_add_tail(&delayed->list, &dc->delayed_bios); mutex_unlock(&delayed_bios_lock); @@ -337,7 +341,9 @@ static void delay_presuspend(struct dm_target *ti) { struct delay_c *dc = ti->private; - atomic_set(&dc->may_delay, 0); + mutex_lock(&delayed_bios_lock); + dc->may_delay = false; + mutex_unlock(&delayed_bios_lock); if (delay_is_fast(dc)) flush_delayed_bios_fast(dc, true); @@ -351,7 +357,7 @@ static void delay_resume(struct dm_target *ti) { struct delay_c *dc = ti->private; - atomic_set(&dc->may_delay, 1); + dc->may_delay = true; } static int delay_map(struct dm_target *ti, struct bio *bio) From 38cfff568169ff9f99784948f79f62ca1af5a187 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 17 Nov 2023 18:22:47 +0100 Subject: [PATCH 224/299] dm-delay: fix bugs introduced by kthread mode This commit fixes the following bugs introduced by commit 70bbeb29fab0 ("dm delay: for short delays, use kthread instead of timers and wq"): * the function flush_worker_fn has no exit path - on unload, this function will just loop and consume 100% CPU without any progress * the wake-up mechanism in flush_worker_fn is racy - a wake up will be missed if the process adds entries to the delayed_bios list just before set_current_state(TASK_INTERRUPTIBLE) * flush_delayed_bios_fast submits a bio while holding a global mutex; this may deadlock if we have multiple stacked dm-delay devices and the underlying device attempts to acquire the mutex too * if the target constructor fails, it will call delay_dtr. delay_dtr would attempt to free dc->timer_lock without it being initialized by the constructor. * if the target constructor's kthread allocation fails, delay_dtr would crash trying to dereference dc->worker because it is non-NULL due to ERR_PTR. Fixes: 70bbeb29fab0 ("dm delay: for short delays, use kthread instead of timers and wq") Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-delay.c | 101 +++++++++++++++++++++++------------------- 1 file changed, 55 insertions(+), 46 deletions(-) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 2d6b900e4353..00c09d9fffeb 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -73,39 +73,6 @@ static inline bool delay_is_fast(struct delay_c *dc) return !!dc->worker; } -static void flush_delayed_bios_fast(struct delay_c *dc, bool flush_all) -{ - struct dm_delay_info *delayed, *next; - - mutex_lock(&delayed_bios_lock); - list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { - if (flush_all || time_after_eq(jiffies, delayed->expires)) { - struct bio *bio = dm_bio_from_per_bio_data(delayed, - sizeof(struct dm_delay_info)); - list_del(&delayed->list); - dm_submit_bio_remap(bio, NULL); - delayed->class->ops--; - } - } - mutex_unlock(&delayed_bios_lock); -} - -static int flush_worker_fn(void *data) -{ - struct delay_c *dc = data; - - while (1) { - flush_delayed_bios_fast(dc, false); - if (unlikely(list_empty(&dc->delayed_bios))) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - } else - cond_resched(); - } - - return 0; -} - static void flush_bios(struct bio *bio) { struct bio *n; @@ -118,12 +85,11 @@ static void flush_bios(struct bio *bio) } } -static struct bio *flush_delayed_bios(struct delay_c *dc, bool flush_all) +static void flush_delayed_bios_fast(struct delay_c *dc, bool flush_all) { struct dm_delay_info *delayed, *next; - unsigned long next_expires = 0; - unsigned long start_timer = 0; - struct bio_list flush_bios = { }; + struct bio_list flush_bio_list; + bio_list_init(&flush_bio_list); mutex_lock(&delayed_bios_lock); list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { @@ -131,7 +97,50 @@ static struct bio *flush_delayed_bios(struct delay_c *dc, bool flush_all) struct bio *bio = dm_bio_from_per_bio_data(delayed, sizeof(struct dm_delay_info)); list_del(&delayed->list); - bio_list_add(&flush_bios, bio); + bio_list_add(&flush_bio_list, bio); + delayed->class->ops--; + } + } + mutex_unlock(&delayed_bios_lock); + + flush_bios(bio_list_get(&flush_bio_list)); +} + +static int flush_worker_fn(void *data) +{ + struct delay_c *dc = data; + + while (!kthread_should_stop()) { + flush_delayed_bios_fast(dc, false); + mutex_lock(&delayed_bios_lock); + if (unlikely(list_empty(&dc->delayed_bios))) { + set_current_state(TASK_INTERRUPTIBLE); + mutex_unlock(&delayed_bios_lock); + schedule(); + } else { + mutex_unlock(&delayed_bios_lock); + cond_resched(); + } + } + + return 0; +} + +static void flush_delayed_bios(struct delay_c *dc, bool flush_all) +{ + struct dm_delay_info *delayed, *next; + unsigned long next_expires = 0; + unsigned long start_timer = 0; + struct bio_list flush_bio_list; + bio_list_init(&flush_bio_list); + + mutex_lock(&delayed_bios_lock); + list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { + if (flush_all || time_after_eq(jiffies, delayed->expires)) { + struct bio *bio = dm_bio_from_per_bio_data(delayed, + sizeof(struct dm_delay_info)); + list_del(&delayed->list); + bio_list_add(&flush_bio_list, bio); delayed->class->ops--; continue; } @@ -147,7 +156,7 @@ static struct bio *flush_delayed_bios(struct delay_c *dc, bool flush_all) if (start_timer) queue_timeout(dc, next_expires); - return bio_list_get(&flush_bios); + flush_bios(bio_list_get(&flush_bio_list)); } static void flush_expired_bios(struct work_struct *work) @@ -158,7 +167,7 @@ static void flush_expired_bios(struct work_struct *work) if (delay_is_fast(dc)) flush_delayed_bios_fast(dc, false); else - flush_bios(flush_delayed_bios(dc, false)); + flush_delayed_bios(dc, false); } static void delay_dtr(struct dm_target *ti) @@ -177,8 +186,7 @@ static void delay_dtr(struct dm_target *ti) if (dc->worker) kthread_stop(dc->worker); - if (!delay_is_fast(dc)) - mutex_destroy(&dc->timer_lock); + mutex_destroy(&dc->timer_lock); kfree(dc); } @@ -236,6 +244,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->private = dc; INIT_LIST_HEAD(&dc->delayed_bios); + mutex_init(&dc->timer_lock); dc->may_delay = true; dc->argc = argc; @@ -282,12 +291,12 @@ out: "dm-delay-flush-worker"); if (IS_ERR(dc->worker)) { ret = PTR_ERR(dc->worker); + dc->worker = NULL; goto bad; } } else { timer_setup(&dc->delay_timer, handle_delayed_timer, 0); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); - mutex_init(&dc->timer_lock); dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); if (!dc->kdelayd_wq) { ret = -EINVAL; @@ -345,11 +354,11 @@ static void delay_presuspend(struct dm_target *ti) dc->may_delay = false; mutex_unlock(&delayed_bios_lock); - if (delay_is_fast(dc)) + if (delay_is_fast(dc)) { flush_delayed_bios_fast(dc, true); - else { + } else { del_timer_sync(&dc->delay_timer); - flush_bios(flush_delayed_bios(dc, true)); + flush_delayed_bios(dc, true); } } From ccadc8a21ef13eb80006ecff6a7466810e4f0dd6 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 17 Nov 2023 18:24:04 +0100 Subject: [PATCH 225/299] dm-delay: avoid duplicate logic This is small refactoring of dm-delay - we avoid duplicate logic in flush_delayed_bios and flush_delayed_bios_fast and join these two functions into one. We also add cond_resched() to flush_delayed_bios because the list may have unbounded number of entries. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-delay.c | 65 ++++++++++++++----------------------------- 1 file changed, 21 insertions(+), 44 deletions(-) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 00c09d9fffeb..5eabdb06c649 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -85,24 +85,40 @@ static void flush_bios(struct bio *bio) } } -static void flush_delayed_bios_fast(struct delay_c *dc, bool flush_all) +static void flush_delayed_bios(struct delay_c *dc, bool flush_all) { struct dm_delay_info *delayed, *next; struct bio_list flush_bio_list; + unsigned long next_expires = 0; + bool start_timer = false; bio_list_init(&flush_bio_list); mutex_lock(&delayed_bios_lock); list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { + cond_resched(); if (flush_all || time_after_eq(jiffies, delayed->expires)) { struct bio *bio = dm_bio_from_per_bio_data(delayed, sizeof(struct dm_delay_info)); list_del(&delayed->list); bio_list_add(&flush_bio_list, bio); delayed->class->ops--; + continue; + } + + if (!delay_is_fast(dc)) { + if (!start_timer) { + start_timer = true; + next_expires = delayed->expires; + } else { + next_expires = min(next_expires, delayed->expires); + } } } mutex_unlock(&delayed_bios_lock); + if (start_timer) + queue_timeout(dc, next_expires); + flush_bios(bio_list_get(&flush_bio_list)); } @@ -111,7 +127,7 @@ static int flush_worker_fn(void *data) struct delay_c *dc = data; while (!kthread_should_stop()) { - flush_delayed_bios_fast(dc, false); + flush_delayed_bios(dc, false); mutex_lock(&delayed_bios_lock); if (unlikely(list_empty(&dc->delayed_bios))) { set_current_state(TASK_INTERRUPTIBLE); @@ -126,48 +142,12 @@ static int flush_worker_fn(void *data) return 0; } -static void flush_delayed_bios(struct delay_c *dc, bool flush_all) -{ - struct dm_delay_info *delayed, *next; - unsigned long next_expires = 0; - unsigned long start_timer = 0; - struct bio_list flush_bio_list; - bio_list_init(&flush_bio_list); - - mutex_lock(&delayed_bios_lock); - list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { - if (flush_all || time_after_eq(jiffies, delayed->expires)) { - struct bio *bio = dm_bio_from_per_bio_data(delayed, - sizeof(struct dm_delay_info)); - list_del(&delayed->list); - bio_list_add(&flush_bio_list, bio); - delayed->class->ops--; - continue; - } - - if (!start_timer) { - start_timer = 1; - next_expires = delayed->expires; - } else - next_expires = min(next_expires, delayed->expires); - } - mutex_unlock(&delayed_bios_lock); - - if (start_timer) - queue_timeout(dc, next_expires); - - flush_bios(bio_list_get(&flush_bio_list)); -} - static void flush_expired_bios(struct work_struct *work) { struct delay_c *dc; dc = container_of(work, struct delay_c, flush_expired_bios); - if (delay_is_fast(dc)) - flush_delayed_bios_fast(dc, false); - else - flush_delayed_bios(dc, false); + flush_delayed_bios(dc, false); } static void delay_dtr(struct dm_target *ti) @@ -354,12 +334,9 @@ static void delay_presuspend(struct dm_target *ti) dc->may_delay = false; mutex_unlock(&delayed_bios_lock); - if (delay_is_fast(dc)) { - flush_delayed_bios_fast(dc, true); - } else { + if (!delay_is_fast(dc)) del_timer_sync(&dc->delay_timer); - flush_delayed_bios(dc, true); - } + flush_delayed_bios(dc, true); } static void delay_resume(struct dm_target *ti) From 2a695062a5a42aead8c539a344168d4806b3fda2 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 17 Nov 2023 18:36:34 +0100 Subject: [PATCH 226/299] dm-bufio: fix no-sleep mode dm-bufio has a no-sleep mode. When activated (with the DM_BUFIO_CLIENT_NO_SLEEP flag), the bufio client is read-only and we could call dm_bufio_get from tasklets. This is used by dm-verity. Unfortunately, commit 450e8dee51aa ("dm bufio: improve concurrent IO performance") broke this and the kernel would warn that cache_get() was calling down_read() from no-sleeping context. The bug can be reproduced by using "veritysetup open" with the "--use-tasklets" flag. This commit fixes dm-bufio, so that the tasklet mode works again, by expanding use of the 'no_sleep_enabled' static_key to conditionally use either a rw_semaphore or rwlock_t (which are colocated in the buffer_tree structure using a union). Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v6.4 Fixes: 450e8dee51aa ("dm bufio: improve concurrent IO performance") Signed-off-by: Mike Snitzer --- drivers/md/dm-bufio.c | 87 ++++++++++++++++++++++++++++++------------- 1 file changed, 62 insertions(+), 25 deletions(-) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 62eb27639c9b..f03d7dba270c 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -254,7 +254,7 @@ enum evict_result { typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context); -static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context) +static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep) { unsigned long tested = 0; struct list_head *h = lru->cursor; @@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con h = h->next; - cond_resched(); + if (!no_sleep) + cond_resched(); } return NULL; @@ -382,7 +383,10 @@ struct dm_buffer { */ struct buffer_tree { - struct rw_semaphore lock; + union { + struct rw_semaphore lock; + rwlock_t spinlock; + } u; struct rb_root root; } ____cacheline_aligned_in_smp; @@ -393,9 +397,12 @@ struct dm_buffer_cache { * on the locks. */ unsigned int num_locks; + bool no_sleep; struct buffer_tree trees[]; }; +static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); + static inline unsigned int cache_index(sector_t block, unsigned int num_locks) { return dm_hash_locks_index(block, num_locks); @@ -403,22 +410,34 @@ static inline unsigned int cache_index(sector_t block, unsigned int num_locks) static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) { - down_read(&bc->trees[cache_index(block, bc->num_locks)].lock); + if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) + read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); + else + down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); } static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) { - up_read(&bc->trees[cache_index(block, bc->num_locks)].lock); + if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) + read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); + else + up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); } static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) { - down_write(&bc->trees[cache_index(block, bc->num_locks)].lock); + if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) + write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); + else + down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); } static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) { - up_write(&bc->trees[cache_index(block, bc->num_locks)].lock); + if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) + write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); + else + up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); } /* @@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool static void __lh_lock(struct lock_history *lh, unsigned int index) { - if (lh->write) - down_write(&lh->cache->trees[index].lock); - else - down_read(&lh->cache->trees[index].lock); + if (lh->write) { + if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) + write_lock_bh(&lh->cache->trees[index].u.spinlock); + else + down_write(&lh->cache->trees[index].u.lock); + } else { + if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) + read_lock_bh(&lh->cache->trees[index].u.spinlock); + else + down_read(&lh->cache->trees[index].u.lock); + } } static void __lh_unlock(struct lock_history *lh, unsigned int index) { - if (lh->write) - up_write(&lh->cache->trees[index].lock); - else - up_read(&lh->cache->trees[index].lock); + if (lh->write) { + if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) + write_unlock_bh(&lh->cache->trees[index].u.spinlock); + else + up_write(&lh->cache->trees[index].u.lock); + } else { + if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) + read_unlock_bh(&lh->cache->trees[index].u.spinlock); + else + up_read(&lh->cache->trees[index].u.lock); + } } /* @@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l) return le_to_buffer(le); } -static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks) +static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) { unsigned int i; bc->num_locks = num_locks; + bc->no_sleep = no_sleep; for (i = 0; i < bc->num_locks; i++) { - init_rwsem(&bc->trees[i].lock); + if (no_sleep) + rwlock_init(&bc->trees[i].u.spinlock); + else + init_rwsem(&bc->trees[i].u.lock); bc->trees[i].root = RB_ROOT; } @@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode struct lru_entry *le; struct dm_buffer *b; - le = lru_evict(&bc->lru[list_mode], __evict_pred, &w); + le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); if (!le) return NULL; @@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_ struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; while (true) { - le = lru_evict(&bc->lru[old_mode], __evict_pred, &w); + le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); if (!le) break; @@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc, { unsigned int i; + BUG_ON(bc->no_sleep); for (i = 0; i < bc->num_locks; i++) { - down_write(&bc->trees[i].lock); + down_write(&bc->trees[i].u.lock); __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); - up_write(&bc->trees[i].lock); + up_write(&bc->trees[i].u.lock); } } @@ -979,8 +1017,6 @@ struct dm_bufio_client { struct dm_buffer_cache cache; /* must be last member */ }; -static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); - /*----------------------------------------------------------------*/ #define dm_bufio_in_request() (!!current->bio_list) @@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, if (need_submit) submit_io(b, REQ_OP_READ, read_endio); - wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); + if (nf != NF_GET) /* we already tested this condition above */ + wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); if (b->read_error) { int error = blk_status_to_errno(b->read_error); @@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign r = -ENOMEM; goto bad_client; } - cache_init(&c->cache, num_locks); + cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0); c->bdev = bdev; c->block_size = block_size; From 28f07f2ab4b3a2714f1fefcc58ada4bcc195f806 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 17 Nov 2023 18:37:25 +0100 Subject: [PATCH 227/299] dm-verity: don't use blocking calls from tasklets The commit 5721d4e5a9cd enhanced dm-verity, so that it can verify blocks from tasklets rather than from workqueues. This reportedly improves performance significantly. However, dm-verity was using the flag CRYPTO_TFM_REQ_MAY_SLEEP from tasklets which resulted in warnings about sleeping function being called from non-sleeping context. BUG: sleeping function called from invalid context at crypto/internal.h:206 in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 14, name: ksoftirqd/0 preempt_count: 100, expected: 0 RCU nest depth: 0, expected: 0 CPU: 0 PID: 14 Comm: ksoftirqd/0 Tainted: G W 6.7.0-rc1 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 Call Trace: dump_stack_lvl+0x32/0x50 __might_resched+0x110/0x160 crypto_hash_walk_done+0x54/0xb0 shash_ahash_update+0x51/0x60 verity_hash_update.isra.0+0x4a/0x130 [dm_verity] verity_verify_io+0x165/0x550 [dm_verity] ? free_unref_page+0xdf/0x170 ? psi_group_change+0x113/0x390 verity_tasklet+0xd/0x70 [dm_verity] tasklet_action_common.isra.0+0xb3/0xc0 __do_softirq+0xaf/0x1ec ? smpboot_thread_fn+0x1d/0x200 ? sort_range+0x20/0x20 run_ksoftirqd+0x15/0x30 smpboot_thread_fn+0xed/0x200 kthread+0xdc/0x110 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x28/0x40 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork_asm+0x11/0x20 This commit fixes dm-verity so that it doesn't use the flags CRYPTO_TFM_REQ_MAY_SLEEP and CRYPTO_TFM_REQ_MAY_BACKLOG from tasklets. The crypto API would do GFP_ATOMIC allocation instead, it could return -ENOMEM and we catch -ENOMEM in verity_tasklet and requeue the request to the workqueue. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v6.0+ Fixes: 5721d4e5a9cd ("dm verity: Add optional "try_verify_in_tasklet" feature") Signed-off-by: Mike Snitzer --- drivers/md/dm-verity-fec.c | 4 ++-- drivers/md/dm-verity-target.c | 23 ++++++++++++----------- drivers/md/dm-verity.h | 2 +- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 3ef9f018da60..2099c755119e 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -185,7 +185,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io, { if (unlikely(verity_hash(v, verity_io_hash_req(v, io), data, 1 << v->data_dev_block_bits, - verity_io_real_digest(v, io)))) + verity_io_real_digest(v, io), true))) return 0; return memcmp(verity_io_real_digest(v, io), want_digest, @@ -386,7 +386,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, /* Always re-validate the corrected block against the expected hash */ r = verity_hash(v, verity_io_hash_req(v, io), fio->output, 1 << v->data_dev_block_bits, - verity_io_real_digest(v, io)); + verity_io_real_digest(v, io), true); if (unlikely(r < 0)) return r; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 26adcfea0302..e115fcfe723c 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -135,20 +135,21 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, * Wrapper for crypto_ahash_init, which handles verity salting. */ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, - struct crypto_wait *wait) + struct crypto_wait *wait, bool may_sleep) { int r; ahash_request_set_tfm(req, v->tfm); - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | - CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, (void *)wait); + ahash_request_set_callback(req, + may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0, + crypto_req_done, (void *)wait); crypto_init_wait(wait); r = crypto_wait_req(crypto_ahash_init(req), wait); if (unlikely(r < 0)) { - DMERR("crypto_ahash_init failed: %d", r); + if (r != -ENOMEM) + DMERR("crypto_ahash_init failed: %d", r); return r; } @@ -179,12 +180,12 @@ out: } int verity_hash(struct dm_verity *v, struct ahash_request *req, - const u8 *data, size_t len, u8 *digest) + const u8 *data, size_t len, u8 *digest, bool may_sleep) { int r; struct crypto_wait wait; - r = verity_hash_init(v, req, &wait); + r = verity_hash_init(v, req, &wait, may_sleep); if (unlikely(r < 0)) goto out; @@ -322,7 +323,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, r = verity_hash(v, verity_io_hash_req(v, io), data, 1 << v->hash_dev_block_bits, - verity_io_real_digest(v, io)); + verity_io_real_digest(v, io), !io->in_tasklet); if (unlikely(r < 0)) goto release_ret_r; @@ -556,7 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io) continue; } - r = verity_hash_init(v, req, &wait); + r = verity_hash_init(v, req, &wait, !io->in_tasklet); if (unlikely(r < 0)) return r; @@ -652,7 +653,7 @@ static void verity_tasklet(unsigned long data) io->in_tasklet = true; err = verity_verify_io(io); - if (err == -EAGAIN) { + if (err == -EAGAIN || err == -ENOMEM) { /* fallback to retrying with work-queue */ INIT_WORK(&io->work, verity_work); queue_work(io->v->verify_wq, &io->work); @@ -1033,7 +1034,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v) goto out; r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits, - v->zero_digest); + v->zero_digest, true); out: kfree(req); diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 2f555b420367..f96f4e281ee4 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -128,7 +128,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io, u8 *data, size_t len)); extern int verity_hash(struct dm_verity *v, struct ahash_request *req, - const u8 *data, size_t len, u8 *digest); + const u8 *data, size_t len, u8 *digest, bool may_sleep); extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero); From 13648e04a9b831b3dfa5cf3887dfa6cf8fe5fe69 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 17 Nov 2023 18:38:33 +0100 Subject: [PATCH 228/299] dm-crypt: start allocating with MAX_ORDER Commit 23baf831a32c ("mm, treewide: redefine MAX_ORDER sanely") changed the meaning of MAX_ORDER from exclusive to inclusive. So, we can allocate compound pages with up to 1 << MAX_ORDER pages. Reflect this change in dm-crypt and start trying to allocate compound pages with MAX_ORDER. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-crypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 6de107aff331..2ae8560b6a14 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1673,7 +1673,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; unsigned int remaining_size; - unsigned int order = MAX_ORDER - 1; + unsigned int order = MAX_ORDER; retry: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) From bc1b5acb40201a0746d68a7d7cfc141899937f4f Mon Sep 17 00:00:00 2001 From: Mahmoud Adam Date: Fri, 10 Nov 2023 19:21:04 +0100 Subject: [PATCH 229/299] nfsd: fix file memleak on client_opens_release seq_release should be called to free the allocated seq_file Cc: stable@vger.kernel.org # v5.3+ Signed-off-by: Mahmoud Adam Reviewed-by: Jeff Layton Fixes: 78599c42ae3c ("nfsd4: add file to display list of client's opens") Reviewed-by: NeilBrown Tested-by: Jeff Layton Signed-off-by: Chuck Lever --- fs/nfsd/nfs4state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 65fd5510323a..3709e58f0a4a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2804,7 +2804,7 @@ static int client_opens_release(struct inode *inode, struct file *file) /* XXX: alternatively, we could get/drop in seq start/stop */ drop_client(clp); - return 0; + return seq_release(inode, file); } static const struct file_operations client_states_fops = { From 49cecd8628a9855cd993792a0377559ea32d5e7c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 10 Nov 2023 11:28:39 -0500 Subject: [PATCH 230/299] NFSD: Update nfsd_cache_append() to use xdr_stream When inserting a DRC-cached response into the reply buffer, ensure that the reply buffer's xdr_stream is updated properly. Otherwise the server will send a garbage response. Cc: stable@vger.kernel.org # v6.3+ Reviewed-by: Jeff Layton Tested-by: Jeff Layton Signed-off-by: Chuck Lever --- fs/nfsd/nfscache.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 80621a709510..abb453be71ca 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -640,24 +640,17 @@ void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp, return; } -/* - * Copy cached reply to current reply buffer. Should always fit. - * FIXME as reply is in a page, we should just attach the page, and - * keep a refcount.... - */ static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) { - struct kvec *vec = &rqstp->rq_res.head[0]; + __be32 *p; - if (vec->iov_len + data->iov_len > PAGE_SIZE) { - printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", - data->iov_len); - return 0; - } - memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); - vec->iov_len += data->iov_len; - return 1; + p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len); + if (unlikely(!p)) + return false; + memcpy(p, data->iov_base, data->iov_len); + xdr_commit_encode(&rqstp->rq_res_stream); + return true; } /* From 1caf5f61dd8430ae5a0b4538afe4953ce7517cbb Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 10 Nov 2023 11:28:33 -0500 Subject: [PATCH 231/299] NFSD: Fix "start of NFS reply" pointer passed to nfsd_cache_update() The "statp + 1" pointer that is passed to nfsd_cache_update() is supposed to point to the start of the egress NFS Reply header. In fact, it does point there for AUTH_SYS and RPCSEC_GSS_KRB5 requests. But both krb5i and krb5p add fields between the RPC header's accept_stat field and the start of the NFS Reply header. In those cases, "statp + 1" points at the extra fields instead of the Reply. The result is that nfsd_cache_update() caches what looks to the client like garbage. A connection break can occur for a number of reasons, but the most common reason when using krb5i/p is a GSS sequence number window underrun. When an underrun is detected, the server is obliged to drop the RPC and the connection to force a retransmit with a fresh GSS sequence number. The client presents the same XID, it hits in the server's DRC, and the server returns the garbage cache entry. The "statp + 1" argument has been used since the oldest changeset in the kernel history repo, so it has been in nfsd_dispatch() literally since before history began. The problem arose only when the server-side GSS implementation was added twenty years ago. Reviewed-by: Jeff Layton Tested-by: Jeff Layton --- fs/nfsd/nfssvc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index d6122bb2d167..b4e4e04f9931 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -981,6 +981,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp) const struct svc_procedure *proc = rqstp->rq_procinfo; __be32 *statp = rqstp->rq_accept_statp; struct nfsd_cacherep *rp; + __be32 *nfs_reply; /* * Give the xdr decoder a chance to change this if it wants @@ -1010,6 +1011,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp) goto out_dropit; } + nfs_reply = xdr_inline_decode(&rqstp->rq_res_stream, 0); *statp = proc->pc_func(rqstp); if (test_bit(RQ_DROPME, &rqstp->rq_flags)) goto out_update_drop; @@ -1023,7 +1025,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp) */ smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter + 1); - nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1); + nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, nfs_reply); out_cached_reply: return 1; From bf51c52a1f3c238d72c64e14d5e7702d3a245b82 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 10 Nov 2023 11:28:45 -0500 Subject: [PATCH 232/299] NFSD: Fix checksum mismatches in the duplicate reply cache nfsd_cache_csum() currently assumes that the server's RPC layer has been advancing rq_arg.head[0].iov_base as it decodes an incoming request, because that's the way it used to work. On entry, it expects that buf->head[0].iov_base points to the start of the NFS header, and excludes the already-decoded RPC header. These days however, head[0].iov_base now points to the start of the RPC header during all processing. It no longer points at the NFS Call header when execution arrives at nfsd_cache_csum(). In a retransmitted RPC the XID and the NFS header are supposed to be the same as the original message, but the contents of the retransmitted RPC header can be different. For example, for krb5, the GSS sequence number will be different between the two. Thus if the RPC header is always included in the DRC checksum computation, the checksum of the retransmitted message might not match the checksum of the original message, even though the NFS part of these messages is identical. The result is that, even if a matching XID is found in the DRC, the checksum mismatch causes the server to execute the retransmitted RPC transaction again. Reviewed-by: Jeff Layton Tested-by: Jeff Layton Signed-off-by: Chuck Lever --- fs/nfsd/cache.h | 4 +-- fs/nfsd/nfscache.c | 64 +++++++++++++++++++++++++++++++--------------- fs/nfsd/nfssvc.c | 10 +++++++- 3 files changed, 54 insertions(+), 24 deletions(-) diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h index 929248c6ca84..4cbe0434cbb8 100644 --- a/fs/nfsd/cache.h +++ b/fs/nfsd/cache.h @@ -84,8 +84,8 @@ int nfsd_net_reply_cache_init(struct nfsd_net *nn); void nfsd_net_reply_cache_destroy(struct nfsd_net *nn); int nfsd_reply_cache_init(struct nfsd_net *); void nfsd_reply_cache_shutdown(struct nfsd_net *); -int nfsd_cache_lookup(struct svc_rqst *rqstp, - struct nfsd_cacherep **cacherep); +int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, + unsigned int len, struct nfsd_cacherep **cacherep); void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp, int cachetype, __be32 *statp); int nfsd_reply_cache_stats_show(struct seq_file *m, void *v); diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index abb453be71ca..6cd36af2f97e 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -368,33 +368,52 @@ nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) return freed; } -/* - * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes +/** + * nfsd_cache_csum - Checksum incoming NFS Call arguments + * @buf: buffer containing a whole RPC Call message + * @start: starting byte of the NFS Call header + * @remaining: size of the NFS Call header, in bytes + * + * Compute a weak checksum of the leading bytes of an NFS procedure + * call header to help verify that a retransmitted Call matches an + * entry in the duplicate reply cache. + * + * To avoid assumptions about how the RPC message is laid out in + * @buf and what else it might contain (eg, a GSS MIC suffix), the + * caller passes us the exact location and length of the NFS Call + * header. + * + * Returns a 32-bit checksum value, as defined in RFC 793. */ -static __wsum -nfsd_cache_csum(struct svc_rqst *rqstp) +static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start, + unsigned int remaining) { + unsigned int base, len; + struct xdr_buf subbuf; + __wsum csum = 0; + void *p; int idx; - unsigned int base; - __wsum csum; - struct xdr_buf *buf = &rqstp->rq_arg; - const unsigned char *p = buf->head[0].iov_base; - size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, - RC_CSUMLEN); - size_t len = min(buf->head[0].iov_len, csum_len); + + if (remaining > RC_CSUMLEN) + remaining = RC_CSUMLEN; + if (xdr_buf_subsegment(buf, &subbuf, start, remaining)) + return csum; /* rq_arg.head first */ - csum = csum_partial(p, len, 0); - csum_len -= len; + if (subbuf.head[0].iov_len) { + len = min_t(unsigned int, subbuf.head[0].iov_len, remaining); + csum = csum_partial(subbuf.head[0].iov_base, len, csum); + remaining -= len; + } /* Continue into page array */ - idx = buf->page_base / PAGE_SIZE; - base = buf->page_base & ~PAGE_MASK; - while (csum_len) { - p = page_address(buf->pages[idx]) + base; - len = min_t(size_t, PAGE_SIZE - base, csum_len); + idx = subbuf.page_base / PAGE_SIZE; + base = subbuf.page_base & ~PAGE_MASK; + while (remaining) { + p = page_address(subbuf.pages[idx]) + base; + len = min_t(unsigned int, PAGE_SIZE - base, remaining); csum = csum_partial(p, len, csum); - csum_len -= len; + remaining -= len; base = 0; ++idx; } @@ -465,6 +484,8 @@ out: /** * nfsd_cache_lookup - Find an entry in the duplicate reply cache * @rqstp: Incoming Call to find + * @start: starting byte in @rqstp->rq_arg of the NFS Call header + * @len: size of the NFS Call header, in bytes * @cacherep: OUT: DRC entry for this request * * Try to find an entry matching the current call in the cache. When none @@ -478,7 +499,8 @@ out: * %RC_REPLY: Reply from cache * %RC_DROPIT: Do not process the request further */ -int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep) +int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, + unsigned int len, struct nfsd_cacherep **cacherep) { struct nfsd_net *nn; struct nfsd_cacherep *rp, *found; @@ -494,7 +516,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep) goto out; } - csum = nfsd_cache_csum(rqstp); + csum = nfsd_cache_csum(&rqstp->rq_arg, start, len); /* * Since the common case is a cache miss followed by an insert, diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index b4e4e04f9931..fe61d9bbcc1f 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -981,6 +981,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp) const struct svc_procedure *proc = rqstp->rq_procinfo; __be32 *statp = rqstp->rq_accept_statp; struct nfsd_cacherep *rp; + unsigned int start, len; __be32 *nfs_reply; /* @@ -989,6 +990,13 @@ int nfsd_dispatch(struct svc_rqst *rqstp) */ rqstp->rq_cachetype = proc->pc_cachetype; + /* + * ->pc_decode advances the argument stream past the NFS + * Call header, so grab the header's starting location and + * size now for the call to nfsd_cache_lookup(). + */ + start = xdr_stream_pos(&rqstp->rq_arg_stream); + len = xdr_stream_remaining(&rqstp->rq_arg_stream); if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream)) goto out_decode_err; @@ -1002,7 +1010,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp) smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter | 1); rp = NULL; - switch (nfsd_cache_lookup(rqstp, &rp)) { + switch (nfsd_cache_lookup(rqstp, start, len, &rp)) { case RC_DOIT: break; case RC_REPLY: From 721d28f3dfb3e40c45ce45fbeeff47b72c230bc9 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 16 Nov 2023 11:13:40 -0800 Subject: [PATCH 233/299] parisc: Replace strlcpy() with strscpy() strlcpy() reads the entire source buffer first. This read may exceed the destination size limit. This is both inefficient and can lead to linear read overflows if a source string is not NUL-terminated[1]. Additionally, it returns the size of the source string, not the resulting size of the destination string. In an effort to remove strlcpy() completely[2], replace strlcpy() here with strscpy(). Link: https://www.kernel.org/doc/html/latest/process/deprecated.html#strlcpy [1] Link: https://github.com/KSPP/linux/issues/89 [2] Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: Azeem Shaikh Cc: linux-parisc@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: Helge Deller --- arch/parisc/kernel/processor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 29e2750f86a4..e95a977ba5f3 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -383,7 +383,7 @@ show_cpuinfo (struct seq_file *m, void *v) char cpu_name[60], *p; /* strip PA path from CPU name to not confuse lscpu */ - strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name)); + strscpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name)); p = strrchr(cpu_name, '['); if (p) *(--p) = 0; From 6ad6e15a9c46b8f0932cd99724f26f3db4db1cdf Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Fri, 17 Nov 2023 16:43:52 +0100 Subject: [PATCH 234/299] parisc/power: Fix power soft-off when running on qemu Firmware returns the physical address of the power switch, so need to use gsc_writel() instead of direct memory access. Fixes: d0c219472980 ("parisc/power: Add power soft-off when running on qemu") Signed-off-by: Helge Deller Cc: stable@vger.kernel.org # v6.0+ --- drivers/parisc/power.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c index 539d8920c202..bb0d92461b08 100644 --- a/drivers/parisc/power.c +++ b/drivers/parisc/power.c @@ -176,7 +176,7 @@ static struct notifier_block parisc_panic_block = { static int qemu_power_off(struct sys_off_data *data) { /* this turns the system off via SeaBIOS */ - *(int *)data->cb_data = 0; + gsc_writel(0, (unsigned long) data->cb_data); pdc_soft_power_button(1); return NOTIFY_DONE; } From 793838138c157d4c49f4fb744b170747e3dabf58 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 18 Nov 2023 19:33:35 +0100 Subject: [PATCH 235/299] prctl: Disable prctl(PR_SET_MDWE) on parisc systemd-254 tries to use prctl(PR_SET_MDWE) for it's MemoryDenyWriteExecute functionality, but fails on parisc which still needs executable stacks in certain combinations of gcc/glibc/kernel. Disable prctl(PR_SET_MDWE) by returning -EINVAL for now on parisc, until userspace has catched up. Signed-off-by: Helge Deller Co-developed-by: Linus Torvalds Reported-by: Sam James Closes: https://github.com/systemd/systemd/issues/29775 Tested-by: Sam James Link: https://lore.kernel.org/all/875y2jro9a.fsf@gentoo.org/ Cc: # v6.3+ --- kernel/sys.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/sys.c b/kernel/sys.c index 420d9cb9cc8e..e219fcfa112d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2394,6 +2394,10 @@ static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3, if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN)) return -EINVAL; + /* PARISC cannot allow mdwe as it needs writable stacks */ + if (IS_ENABLED(CONFIG_PARISC)) + return -EINVAL; + current_bits = get_current_mdwe(); if (current_bits && current_bits != bits) return -EPERM; /* Cannot unset the flags */ From 8ba2c459668cfe2aaacc5ebcd35b4b9ef8643013 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Fri, 17 Nov 2023 18:11:08 +0800 Subject: [PATCH 236/299] net: wangxun: fix kernel panic due to null pointer When the device uses a custom subsystem vendor ID, the function wx_sw_init() returns before the memory of 'wx->mac_table' is allocated. The null pointer will causes the kernel panic. Fixes: 79625f45ca73 ("net: wangxun: Move MAC address handling to libwx") Signed-off-by: Jiawen Wu Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/wangxun/libwx/wx_hw.c | 8 +++++--- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 4 +--- drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 4 +--- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index a3c5de9d547a..533e912af089 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1769,10 +1769,12 @@ int wx_sw_init(struct wx *wx) wx->subsystem_device_id = pdev->subsystem_device; } else { err = wx_flash_read_dword(wx, 0xfffdc, &ssid); - if (!err) - wx->subsystem_device_id = swab16((u16)ssid); + if (err < 0) { + wx_err(wx, "read of internal subsystem device id failed\n"); + return err; + } - return err; + wx->subsystem_device_id = swab16((u16)ssid); } wx->mac_table = kcalloc(wx->mac.num_rar_entries, diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 3d43f808c86b..8db804543e66 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -121,10 +121,8 @@ static int ngbe_sw_init(struct wx *wx) /* PCI config space info */ err = wx_sw_init(wx); - if (err < 0) { - wx_err(wx, "read of internal subsystem device id failed\n"); + if (err < 0) return err; - } /* mac type, phy type , oem type */ ngbe_init_type_code(wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 70f0b5c01dac..526250102db2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -364,10 +364,8 @@ static int txgbe_sw_init(struct wx *wx) /* PCI config space info */ err = wx_sw_init(wx); - if (err < 0) { - wx_err(wx, "read of internal subsystem device id failed\n"); + if (err < 0) return err; - } txgbe_init_type_code(wx); From 93da8d75a66568ba4bb5b14ad2833acd7304cd02 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 17 Nov 2023 14:17:33 +0000 Subject: [PATCH 237/299] wireguard: use DEV_STATS_INC() wg_xmit() can be called concurrently, KCSAN reported [1] some device stats updates can be lost. Use DEV_STATS_INC() for this unlikely case. [1] BUG: KCSAN: data-race in wg_xmit / wg_xmit read-write to 0xffff888104239160 of 8 bytes by task 1375 on cpu 0: wg_xmit+0x60f/0x680 drivers/net/wireguard/device.c:231 __netdev_start_xmit include/linux/netdevice.h:4918 [inline] netdev_start_xmit include/linux/netdevice.h:4932 [inline] xmit_one net/core/dev.c:3543 [inline] dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3559 ... read-write to 0xffff888104239160 of 8 bytes by task 1378 on cpu 1: wg_xmit+0x60f/0x680 drivers/net/wireguard/device.c:231 __netdev_start_xmit include/linux/netdevice.h:4918 [inline] netdev_start_xmit include/linux/netdevice.h:4932 [inline] xmit_one net/core/dev.c:3543 [inline] dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3559 ... v2: also change wg_packet_consume_data_done() (Hangbin Liu) and wg_packet_purge_staged_packets() Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Jason A. Donenfeld Cc: Hangbin Liu Signed-off-by: Jason A. Donenfeld Reviewed-by: Hangbin Liu Signed-off-by: David S. Miller --- drivers/net/wireguard/device.c | 4 ++-- drivers/net/wireguard/receive.c | 12 ++++++------ drivers/net/wireguard/send.c | 3 ++- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 258dcc103921..deb9636b0ecf 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -210,7 +210,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) */ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); - ++dev->stats.tx_dropped; + DEV_STATS_INC(dev, tx_dropped); } skb_queue_splice_tail(&packets, &peer->staged_packet_queue); spin_unlock_bh(&peer->staged_packet_queue.lock); @@ -228,7 +228,7 @@ err_icmp: else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); err: - ++dev->stats.tx_errors; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); return ret; } diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 0b3f0c843550..a176653c8861 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -416,20 +416,20 @@ dishonest_packet_peer: net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", dev->name, skb, peer->internal_id, &peer->endpoint.addr); - ++dev->stats.rx_errors; - ++dev->stats.rx_frame_errors; + DEV_STATS_INC(dev, rx_errors); + DEV_STATS_INC(dev, rx_frame_errors); goto packet_processed; dishonest_packet_type: net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", dev->name, peer->internal_id, &peer->endpoint.addr); - ++dev->stats.rx_errors; - ++dev->stats.rx_frame_errors; + DEV_STATS_INC(dev, rx_errors); + DEV_STATS_INC(dev, rx_frame_errors); goto packet_processed; dishonest_packet_size: net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", dev->name, peer->internal_id, &peer->endpoint.addr); - ++dev->stats.rx_errors; - ++dev->stats.rx_length_errors; + DEV_STATS_INC(dev, rx_errors); + DEV_STATS_INC(dev, rx_length_errors); goto packet_processed; packet_processed: dev_kfree_skb(skb); diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c index 95c853b59e1d..0d48e0f4a1ba 100644 --- a/drivers/net/wireguard/send.c +++ b/drivers/net/wireguard/send.c @@ -333,7 +333,8 @@ err: void wg_packet_purge_staged_packets(struct wg_peer *peer) { spin_lock_bh(&peer->staged_packet_queue.lock); - peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; + DEV_STATS_ADD(peer->device->dev, tx_dropped, + peer->staged_packet_queue.qlen); __skb_queue_purge(&peer->staged_packet_queue); spin_unlock_bh(&peer->staged_packet_queue.lock); } From 5f228d7c8a539714c1e9b7e7534f76bb7979f268 Mon Sep 17 00:00:00 2001 From: Suman Ghosh Date: Fri, 17 Nov 2023 16:10:18 +0530 Subject: [PATCH 238/299] octeontx2-pf: Fix memory leak during interface down During 'ifconfig down' one RSS memory was not getting freed. This patch fixes the same. Fixes: 81a4362016e7 ("octeontx2-pf: Add RSS multi group support") Signed-off-by: Suman Ghosh Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 91b99fd70361..ba95ac913274 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1934,6 +1934,8 @@ int otx2_stop(struct net_device *netdev) /* Clear RSS enable flag */ rss = &pf->hw.rss_info; rss->enable = false; + if (!netif_is_rxfh_configured(netdev)) + kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, From 938dbead34cd139c50c5d51cc58e18ef2f1c3d2c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 18 Nov 2023 19:30:06 -0800 Subject: [PATCH 239/299] net: fill in MODULE_DESCRIPTION()s for SOCK_DIAG modules W=1 builds now warn if module is built without a MODULE_DESCRIPTION(). Add descriptions to all the sock diag modules in one fell swoop. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/ipv4/inet_diag.c | 1 + net/ipv4/raw_diag.c | 1 + net/ipv4/tcp_diag.c | 1 + net/ipv4/udp_diag.c | 1 + net/mptcp/mptcp_diag.c | 1 + net/packet/diag.c | 1 + net/sctp/diag.c | 1 + net/smc/smc_diag.c | 1 + net/tipc/diag.c | 1 + net/unix/diag.c | 1 + net/vmw_vsock/diag.c | 1 + net/xdp/xsk_diag.c | 1 + 12 files changed, 12 insertions(+) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index f01aee832aab..7d0e7aaa71e0 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -1481,5 +1481,6 @@ static void __exit inet_diag_exit(void) module_init(inet_diag_init); module_exit(inet_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("INET/INET6: socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */); diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c index 63a40e4b678f..fe2140c8375c 100644 --- a/net/ipv4/raw_diag.c +++ b/net/ipv4/raw_diag.c @@ -257,5 +257,6 @@ static void __exit raw_diag_exit(void) module_init(raw_diag_init); module_exit(raw_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RAW socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-255 /* AF_INET - IPPROTO_RAW */); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10-255 /* AF_INET6 - IPPROTO_RAW */); diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 01b50fa79189..4cbe4b44425a 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -247,4 +247,5 @@ static void __exit tcp_diag_exit(void) module_init(tcp_diag_init); module_exit(tcp_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TCP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-6 /* AF_INET - IPPROTO_TCP */); diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index de3f2d31f510..dc41a22ee80e 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -296,5 +296,6 @@ static void __exit udp_diag_exit(void) module_init(udp_diag_init); module_exit(udp_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("UDP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */); diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c index 8df1bdb647e2..5409c2ea3f57 100644 --- a/net/mptcp/mptcp_diag.c +++ b/net/mptcp/mptcp_diag.c @@ -245,4 +245,5 @@ static void __exit mptcp_diag_exit(void) module_init(mptcp_diag_init); module_exit(mptcp_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MPTCP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-262 /* AF_INET - IPPROTO_MPTCP */); diff --git a/net/packet/diag.c b/net/packet/diag.c index f6b200cb3c06..9a7980e3309d 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c @@ -262,4 +262,5 @@ static void __exit packet_diag_exit(void) module_init(packet_diag_init); module_exit(packet_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PACKET socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */); diff --git a/net/sctp/diag.c b/net/sctp/diag.c index c3d6b92dd386..eb05131ff1dd 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -527,4 +527,5 @@ static void __exit sctp_diag_exit(void) module_init(sctp_diag_init); module_exit(sctp_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SCTP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132); diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 7ff2152971a5..a584613aca12 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -268,5 +268,6 @@ static void __exit smc_diag_exit(void) module_init(smc_diag_init); module_exit(smc_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SMC socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */); MODULE_ALIAS_GENL_FAMILY(SMCR_GENL_FAMILY_NAME); diff --git a/net/tipc/diag.c b/net/tipc/diag.c index 73137f4aeb68..18733451c9e0 100644 --- a/net/tipc/diag.c +++ b/net/tipc/diag.c @@ -113,4 +113,5 @@ module_init(tipc_diag_init); module_exit(tipc_diag_exit); MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("TIPC socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_TIPC); diff --git a/net/unix/diag.c b/net/unix/diag.c index 616b55c5b890..bec09a3a1d44 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -339,4 +339,5 @@ static void __exit unix_diag_exit(void) module_init(unix_diag_init); module_exit(unix_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("UNIX socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */); diff --git a/net/vmw_vsock/diag.c b/net/vmw_vsock/diag.c index a2823b1c5e28..2e29994f92ff 100644 --- a/net/vmw_vsock/diag.c +++ b/net/vmw_vsock/diag.c @@ -174,5 +174,6 @@ static void __exit vsock_diag_exit(void) module_init(vsock_diag_init); module_exit(vsock_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("VMware Virtual Sockets monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 40 /* AF_VSOCK */); diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c index 22b36c8143cf..9f8955367275 100644 --- a/net/xdp/xsk_diag.c +++ b/net/xdp/xsk_diag.c @@ -211,4 +211,5 @@ static void __exit xsk_diag_exit(void) module_init(xsk_diag_init); module_exit(xsk_diag_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("XDP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP); From 98b1cc82c4affc16f5598d4fa14b1858671b2263 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 19 Nov 2023 15:02:14 -0800 Subject: [PATCH 240/299] Linux 6.7-rc2 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ede0bd241056..724c79bebe72 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 7 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION* From 79e0c5be8c73a674c92bd4ba77b75f4f8c91d32e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 14 Nov 2023 01:42:13 +0100 Subject: [PATCH 241/299] net, vrf: Move dstats structure to core Just move struct pcpu_dstats out of the vrf into the core, and streamline the field names slightly, so they better align with the {t,l}stats ones. No functional change otherwise. A conversion of the u64s to u64_stats_t could be done at a separate point in future. This move is needed as we are moving the {t,l,d}stats allocation/freeing to the core. Signed-off-by: Daniel Borkmann Reviewed-by: Nikolay Aleksandrov Cc: Jakub Kicinski Cc: David Ahern Link: https://lore.kernel.org/r/20231114004220.6495-2-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- drivers/net/vrf.c | 24 +++++++----------------- include/linux/netdevice.h | 10 ++++++++++ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index db766941b78f..3e6e0fdc3ba7 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -121,22 +121,12 @@ struct net_vrf { int ifindex; }; -struct pcpu_dstats { - u64 tx_pkts; - u64 tx_bytes; - u64 tx_drps; - u64 rx_pkts; - u64 rx_bytes; - u64 rx_drps; - struct u64_stats_sync syncp; -}; - static void vrf_rx_stats(struct net_device *dev, int len) { struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); - dstats->rx_pkts++; + dstats->rx_packets++; dstats->rx_bytes += len; u64_stats_update_end(&dstats->syncp); } @@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev, do { start = u64_stats_fetch_begin(&dstats->syncp); tbytes = dstats->tx_bytes; - tpkts = dstats->tx_pkts; - tdrops = dstats->tx_drps; + tpkts = dstats->tx_packets; + tdrops = dstats->tx_drops; rbytes = dstats->rx_bytes; - rpkts = dstats->rx_pkts; + rpkts = dstats->rx_packets; } while (u64_stats_fetch_retry(&dstats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpkts; @@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) vrf_rx_stats(dev, len); else - this_cpu_inc(dev->dstats->rx_drps); + this_cpu_inc(dev->dstats->rx_drops); return NETDEV_TX_OK; } @@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); - dstats->tx_pkts++; + dstats->tx_packets++; dstats->tx_bytes += len; u64_stats_update_end(&dstats->syncp); } else { - this_cpu_inc(dev->dstats->tx_drps); + this_cpu_inc(dev->dstats->tx_drops); } return ret; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a16c9cc063fe..98082113156e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2755,6 +2755,16 @@ struct pcpu_sw_netstats { struct u64_stats_sync syncp; } __aligned(4 * sizeof(u64)); +struct pcpu_dstats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_drops; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; + struct u64_stats_sync syncp; +} __aligned(8 * sizeof(u64)); + struct pcpu_lstats { u64_stats_t packets; u64_stats_t bytes; From 34d21de99cea9cb17967874313e5b0262527833c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 14 Nov 2023 01:42:14 +0100 Subject: [PATCH 242/299] net: Move {l,t,d}stats allocation to core and convert veth & vrf Move {l,t,d}stats allocation to the core and let netdevs pick the stats type they need. That way the driver doesn't have to bother with error handling (allocation failure checking, making sure free happens in the right spot, etc) - all happening in the core. Co-developed-by: Jakub Kicinski Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann Reviewed-by: Nikolay Aleksandrov Cc: David Ahern Link: https://lore.kernel.org/r/20231114004220.6495-3-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- drivers/net/veth.c | 16 ++----------- drivers/net/vrf.c | 14 +++-------- include/linux/netdevice.h | 20 ++++++++++++---- net/core/dev.c | 49 ++++++++++++++++++++++++++++++++++++++- 4 files changed, 69 insertions(+), 30 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9980517ed8b0..ac030c241d1a 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1506,25 +1506,12 @@ static void veth_free_queues(struct net_device *dev) static int veth_dev_init(struct net_device *dev) { - int err; - - dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); - if (!dev->lstats) - return -ENOMEM; - - err = veth_alloc_queues(dev); - if (err) { - free_percpu(dev->lstats); - return err; - } - - return 0; + return veth_alloc_queues(dev); } static void veth_dev_free(struct net_device *dev) { veth_free_queues(dev); - free_percpu(dev->lstats); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1796,6 +1783,7 @@ static void veth_setup(struct net_device *dev) NETIF_F_HW_VLAN_STAG_RX); dev->needs_free_netdev = true; dev->priv_destructor = veth_dev_free; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS; dev->max_mtu = ETH_MAX_MTU; dev->hw_features = VETH_FEATURES; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 3e6e0fdc3ba7..bb95ce43cd97 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1164,22 +1164,15 @@ static void vrf_dev_uninit(struct net_device *dev) vrf_rtable_release(dev, vrf); vrf_rt6_release(dev, vrf); - - free_percpu(dev->dstats); - dev->dstats = NULL; } static int vrf_dev_init(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); - if (!dev->dstats) - goto out_nomem; - /* create the default dst which points back to us */ if (vrf_rtable_create(dev) != 0) - goto out_stats; + goto out_nomem; if (vrf_rt6_create(dev) != 0) goto out_rth; @@ -1193,9 +1186,6 @@ static int vrf_dev_init(struct net_device *dev) out_rth: vrf_rtable_release(dev, vrf); -out_stats: - free_percpu(dev->dstats); - dev->dstats = NULL; out_nomem: return -ENOMEM; } @@ -1694,6 +1684,8 @@ static void vrf_setup(struct net_device *dev) dev->min_mtu = IPV6_MIN_MTU; dev->max_mtu = IP6_MAX_MTU; dev->mtu = dev->max_mtu; + + dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 98082113156e..2564e209465e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1797,6 +1797,13 @@ enum netdev_ml_priv_type { ML_PRIV_CAN, }; +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE, + NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */ + NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */ + NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */ +}; + /** * struct net_device - The DEVICE structure. * @@ -1991,10 +1998,14 @@ enum netdev_ml_priv_type { * * @ml_priv: Mid-layer private * @ml_priv_type: Mid-layer private type - * @lstats: Loopback statistics - * @tstats: Tunnel statistics - * @dstats: Dummy statistics - * @vstats: Virtual ethernet statistics + * + * @pcpu_stat_type: Type of device statistics which the core should + * allocate/free: none, lstats, tstats, dstats. none + * means the driver is handling statistics allocation/ + * freeing internally. + * @lstats: Loopback statistics: packets, bytes + * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes + * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes * * @garp_port: GARP * @mrp_port: MRP @@ -2354,6 +2365,7 @@ struct net_device { void *ml_priv; enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type:8; union { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; diff --git a/net/core/dev.c b/net/core/dev.c index af53f6d838ce..0cc6e283edba 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10051,6 +10051,46 @@ void netif_tx_stop_all_queues(struct net_device *dev) } EXPORT_SYMBOL(netif_tx_stop_all_queues); +static int netdev_do_alloc_pcpu_stats(struct net_device *dev) +{ + void __percpu *v; + + switch (dev->pcpu_stat_type) { + case NETDEV_PCPU_STAT_NONE: + return 0; + case NETDEV_PCPU_STAT_LSTATS: + v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); + break; + case NETDEV_PCPU_STAT_TSTATS: + v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + break; + case NETDEV_PCPU_STAT_DSTATS: + v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); + break; + default: + return -EINVAL; + } + + return v ? 0 : -ENOMEM; +} + +static void netdev_do_free_pcpu_stats(struct net_device *dev) +{ + switch (dev->pcpu_stat_type) { + case NETDEV_PCPU_STAT_NONE: + return; + case NETDEV_PCPU_STAT_LSTATS: + free_percpu(dev->lstats); + break; + case NETDEV_PCPU_STAT_TSTATS: + free_percpu(dev->tstats); + break; + case NETDEV_PCPU_STAT_DSTATS: + free_percpu(dev->dstats); + break; + } +} + /** * register_netdevice() - register a network device * @dev: device to register @@ -10111,9 +10151,13 @@ int register_netdevice(struct net_device *dev) goto err_uninit; } + ret = netdev_do_alloc_pcpu_stats(dev); + if (ret) + goto err_uninit; + ret = dev_index_reserve(net, dev->ifindex); if (ret < 0) - goto err_uninit; + goto err_free_pcpu; dev->ifindex = ret; /* Transfer changeable features to wanted_features and enable @@ -10219,6 +10263,8 @@ err_uninit_notify: call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); err_ifindex_release: dev_index_release(net, dev->ifindex); +err_free_pcpu: + netdev_do_free_pcpu_stats(dev); err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); @@ -10471,6 +10517,7 @@ void netdev_run_todo(void) WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); + netdev_do_free_pcpu_stats(dev); if (dev->priv_destructor) dev->priv_destructor(dev); if (dev->needs_free_netdev) From ae1658272c6491a31ac968e39882fc569f312ac3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 14 Nov 2023 01:42:15 +0100 Subject: [PATCH 243/299] netkit: Add tstats per-CPU traffic counters Add dev->tstats traffic accounting to netkit. The latter contains per-CPU RX and TX counters. The dev's TX counters are bumped upon pass/unspec as well as redirect verdicts, in other words, on everything except for drops. The dev's RX counters are bumped upon successful __netif_rx(), as well as from skb_do_redirect() (not part of this commit here). Using dev->lstats with having just a single packets/bytes counter and inferring one another's RX counters from the peer dev's lstats is not possible given skb_do_redirect() can also bump the device's stats. Signed-off-by: Daniel Borkmann Acked-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20231114004220.6495-4-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- drivers/net/netkit.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index 5a0f86f38f09..99de11f9cde5 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -68,6 +68,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t ret_dev = NET_XMIT_SUCCESS; const struct bpf_mprog_entry *entry; struct net_device *peer; + int len = skb->len; rcu_read_lock(); peer = rcu_dereference(nk->peer); @@ -85,15 +86,22 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) case NETKIT_PASS: skb->protocol = eth_type_trans(skb, skb->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); - __netif_rx(skb); + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) { + dev_sw_netstats_tx_add(dev, 1, len); + dev_sw_netstats_rx_add(peer, len); + } else { + goto drop_stats; + } break; case NETKIT_REDIRECT: + dev_sw_netstats_tx_add(dev, 1, len); skb_do_redirect(skb); break; case NETKIT_DROP: default: drop: kfree_skb(skb); +drop_stats: dev_core_stats_tx_dropped_inc(dev); ret_dev = NET_XMIT_DROP; break; @@ -174,6 +182,13 @@ static struct net_device *netkit_peer_dev(struct net_device *dev) return rcu_dereference(netkit_priv(dev)->peer); } +static void netkit_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + dev_fetch_sw_netstats(stats, dev->tstats); + stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped); +} + static void netkit_uninit(struct net_device *dev); static const struct net_device_ops netkit_netdev_ops = { @@ -184,6 +199,7 @@ static const struct net_device_ops netkit_netdev_ops = { .ndo_set_rx_headroom = netkit_set_headroom, .ndo_get_iflink = netkit_get_iflink, .ndo_get_peer_dev = netkit_peer_dev, + .ndo_get_stats64 = netkit_get_stats, .ndo_uninit = netkit_uninit, .ndo_features_check = passthru_features_check, }; @@ -218,6 +234,7 @@ static void netkit_setup(struct net_device *dev) ether_setup(dev); dev->max_mtu = ETH_MAX_MTU; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->flags |= IFF_NOARP; dev->priv_flags &= ~IFF_TX_SKB_SHARING; From 6f2684bf2b4460c84d0d34612a939f78b96b03fc Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Tue, 14 Nov 2023 01:42:16 +0100 Subject: [PATCH 244/299] veth: Use tstats per-CPU traffic counters Currently veth devices use the lstats per-CPU traffic counters, which only cover TX traffic. veth_get_stats64() actually populates RX stats of a veth device from its peer's TX counters, based on the assumption that a veth device can _only_ receive packets from its peer, which is no longer true: For example, recent CNIs (like Cilium) can use the bpf_redirect_peer() BPF helper to redirect traffic from NIC's tc ingress to veth's tc ingress (in a different netns), skipping veth's peer device. Unfortunately, this kind of traffic isn't currently accounted for in veth's RX stats. In preparation for the fix, use tstats (instead of lstats) to maintain both RX and TX counters for each veth device. We'll use RX counters for bpf_redirect_peer() traffic, and keep using TX counters for the usual "peer-to-peer" traffic. In veth_get_stats64(), calculate RX stats by _adding_ RX count to peer's TX count, in order to cover both kinds of traffic. veth_stats_rx() might need a name change (perhaps to "veth_stats_xdp()") for less confusion, but let's leave it to another patch to keep the fix minimal. Signed-off-by: Peilin Ye Co-developed-by: Daniel Borkmann Signed-off-by: Daniel Borkmann Reviewed-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20231114004220.6495-5-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- drivers/net/veth.c | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ac030c241d1a..6cc352296c67 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -373,7 +373,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { if (!use_napi) - dev_lstats_add(dev, length); + dev_sw_netstats_tx_add(dev, 1, length); else __veth_xdp_flush(rq); } else { @@ -387,14 +387,6 @@ drop: return ret; } -static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes) -{ - struct veth_priv *priv = netdev_priv(dev); - - dev_lstats_read(dev, packets, bytes); - return atomic64_read(&priv->dropped); -} - static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); @@ -432,24 +424,24 @@ static void veth_get_stats64(struct net_device *dev, struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; struct veth_stats rx; - u64 packets, bytes; - tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes); - tot->tx_bytes = bytes; - tot->tx_packets = packets; + tot->tx_dropped = atomic64_read(&priv->dropped); + dev_fetch_sw_netstats(tot, dev->tstats); veth_stats_rx(&rx, dev); tot->tx_dropped += rx.xdp_tx_err; tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; - tot->rx_bytes = rx.xdp_bytes; - tot->rx_packets = rx.xdp_packets; + tot->rx_bytes += rx.xdp_bytes; + tot->rx_packets += rx.xdp_packets; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (peer) { - veth_stats_tx(peer, &packets, &bytes); - tot->rx_bytes += bytes; - tot->rx_packets += packets; + struct rtnl_link_stats64 tot_peer = {}; + + dev_fetch_sw_netstats(&tot_peer, peer->tstats); + tot->rx_bytes += tot_peer.tx_bytes; + tot->rx_packets += tot_peer.tx_packets; veth_stats_rx(&rx, peer); tot->tx_dropped += rx.peer_tq_xdp_xmit_err; @@ -1783,7 +1775,7 @@ static void veth_setup(struct net_device *dev) NETIF_F_HW_VLAN_STAG_RX); dev->needs_free_netdev = true; dev->priv_destructor = veth_dev_free; - dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->max_mtu = ETH_MAX_MTU; dev->hw_features = VETH_FEATURES; From 024ee930cb3c9ae49e4266aee89cfde0ebb407e1 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Tue, 14 Nov 2023 01:42:17 +0100 Subject: [PATCH 245/299] bpf: Fix dev's rx stats for bpf_redirect_peer traffic Traffic redirected by bpf_redirect_peer() (used by recent CNIs like Cilium) is not accounted for in the RX stats of supported devices (that is, veth and netkit), confusing user space metrics collectors such as cAdvisor [0], as reported by Youlun. Fix it by calling dev_sw_netstats_rx_add() in skb_do_redirect(), to update RX traffic counters. Devices that support ndo_get_peer_dev _must_ use the @tstats per-CPU counters (instead of @lstats, or @dstats). To make this more fool-proof, error out when ndo_get_peer_dev is set but @tstats are not selected. [0] Specifically, the "container_network_receive_{byte,packet}s_total" counters are affected. Fixes: 9aa1206e8f48 ("bpf: Add redirect_peer helper") Reported-by: Youlun Zhang Signed-off-by: Peilin Ye Co-developed-by: Daniel Borkmann Signed-off-by: Daniel Borkmann Reviewed-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20231114004220.6495-6-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- net/core/dev.c | 8 ++++++++ net/core/filter.c | 1 + 2 files changed, 9 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index 0cc6e283edba..c879246be48d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10055,6 +10055,14 @@ static int netdev_do_alloc_pcpu_stats(struct net_device *dev) { void __percpu *v; + /* Drivers implementing ndo_get_peer_dev must support tstat + * accounting, so that skb_do_redirect() can bump the dev's + * RX stats upon network namespace switch. + */ + if (dev->netdev_ops->ndo_get_peer_dev && + dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS) + return -EOPNOTSUPP; + switch (dev->pcpu_stat_type) { case NETDEV_PCPU_STAT_NONE: return 0; diff --git a/net/core/filter.c b/net/core/filter.c index 383f96b0a1c7..cca810987c8d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2492,6 +2492,7 @@ int skb_do_redirect(struct sk_buff *skb) net_eq(net, dev_net(dev)))) goto out_drop; skb->dev = dev; + dev_sw_netstats_rx_add(dev, skb->len); return -EAGAIN; } return flags & BPF_F_NEIGH ? From 2c225425704078282e152ba692649237f78b3d7a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 14 Nov 2023 01:42:18 +0100 Subject: [PATCH 246/299] bpf, netkit: Add indirect call wrapper for fetching peer dev ndo_get_peer_dev is used in tcx BPF fast path, therefore make use of indirect call wrapper and therefore optimize the bpf_redirect_peer() internal handling a bit. Add a small skb_get_peer_dev() wrapper which utilizes the INDIRECT_CALL_1() macro instead of open coding. Future work could potentially add a peer pointer directly into struct net_device in future and convert veth and netkit over to use it so that eventually ndo_get_peer_dev can be removed. Co-developed-by: Nikolay Aleksandrov Signed-off-by: Nikolay Aleksandrov Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20231114004220.6495-7-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- drivers/net/netkit.c | 3 ++- include/net/netkit.h | 6 ++++++ net/core/filter.c | 18 +++++++++++++----- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index 99de11f9cde5..97bd6705c241 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -177,7 +178,7 @@ out: rcu_read_unlock(); } -static struct net_device *netkit_peer_dev(struct net_device *dev) +INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev) { return rcu_dereference(netkit_priv(dev)->peer); } diff --git a/include/net/netkit.h b/include/net/netkit.h index 0ba2e6b847ca..9ec0163739f4 100644 --- a/include/net/netkit.h +++ b/include/net/netkit.h @@ -10,6 +10,7 @@ int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog); int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev)); #else static inline int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) @@ -34,5 +35,10 @@ static inline int netkit_prog_query(const union bpf_attr *attr, { return -EINVAL; } + +static inline struct net_device *netkit_peer_dev(struct net_device *dev) +{ + return NULL; +} #endif /* CONFIG_NETKIT */ #endif /* __NET_NETKIT_H */ diff --git a/net/core/filter.c b/net/core/filter.c index cca810987c8d..7e4d7c3bcc84 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include "dev.h" @@ -2468,6 +2469,16 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = { DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); +static struct net_device *skb_get_peer_dev(struct net_device *dev) +{ + const struct net_device_ops *ops = dev->netdev_ops; + + if (likely(ops->ndo_get_peer_dev)) + return INDIRECT_CALL_1(ops->ndo_get_peer_dev, + netkit_peer_dev, dev); + return NULL; +} + int skb_do_redirect(struct sk_buff *skb) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); @@ -2481,12 +2492,9 @@ int skb_do_redirect(struct sk_buff *skb) if (unlikely(!dev)) goto out_drop; if (flags & BPF_F_PEER) { - const struct net_device_ops *ops = dev->netdev_ops; - - if (unlikely(!ops->ndo_get_peer_dev || - !skb_at_tc_ingress(skb))) + if (unlikely(!skb_at_tc_ingress(skb))) goto out_drop; - dev = ops->ndo_get_peer_dev(dev); + dev = skb_get_peer_dev(dev); if (unlikely(!dev || !(dev->flags & IFF_UP) || net_eq(net, dev_net(dev)))) From eee82da79f036bb49ff80d3088b9530e3c2e57eb Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 14 Nov 2023 01:42:19 +0100 Subject: [PATCH 247/299] selftests/bpf: De-veth-ize the tc_redirect test case No functional changes to the test case, but just renaming various functions, variables, etc, to remove veth part of their name for making it more generic and reusable later on (e.g. for netkit). Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Reviewed-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20231114004220.6495-8-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- .../selftests/bpf/prog_tests/tc_redirect.c | 263 +++++++++--------- 1 file changed, 137 insertions(+), 126 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 6ee22c3b251a..407ff4e9bc78 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -110,11 +110,16 @@ static void netns_setup_namespaces_nofail(const char *verb) } } +enum dev_mode { + MODE_VETH, +}; + struct netns_setup_result { - int ifindex_veth_src; - int ifindex_veth_src_fwd; - int ifindex_veth_dst; - int ifindex_veth_dst_fwd; + enum dev_mode dev_mode; + int ifindex_src; + int ifindex_src_fwd; + int ifindex_dst; + int ifindex_dst_fwd; }; static int get_ifaddr(const char *name, char *ifaddr) @@ -140,55 +145,59 @@ static int get_ifaddr(const char *name, char *ifaddr) static int netns_setup_links_and_routes(struct netns_setup_result *result) { struct nstoken *nstoken = NULL; - char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {}; + char src_fwd_addr[IFADDR_STR_LEN+1] = {}; - SYS(fail, "ip link add veth_src type veth peer name veth_src_fwd"); - SYS(fail, "ip link add veth_dst type veth peer name veth_dst_fwd"); + if (result->dev_mode == MODE_VETH) { + SYS(fail, "ip link add src type veth peer name src_fwd"); + SYS(fail, "ip link add dst type veth peer name dst_fwd"); - SYS(fail, "ip link set veth_dst_fwd address " MAC_DST_FWD); - SYS(fail, "ip link set veth_dst address " MAC_DST); + SYS(fail, "ip link set dst_fwd address " MAC_DST_FWD); + SYS(fail, "ip link set dst address " MAC_DST); + } - if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr)) + if (get_ifaddr("src_fwd", src_fwd_addr)) goto fail; - result->ifindex_veth_src = if_nametoindex("veth_src"); - if (!ASSERT_GT(result->ifindex_veth_src, 0, "ifindex_veth_src")) + result->ifindex_src = if_nametoindex("src"); + if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src")) goto fail; - result->ifindex_veth_src_fwd = if_nametoindex("veth_src_fwd"); - if (!ASSERT_GT(result->ifindex_veth_src_fwd, 0, "ifindex_veth_src_fwd")) + result->ifindex_src_fwd = if_nametoindex("src_fwd"); + if (!ASSERT_GT(result->ifindex_src_fwd, 0, "ifindex_src_fwd")) goto fail; - result->ifindex_veth_dst = if_nametoindex("veth_dst"); - if (!ASSERT_GT(result->ifindex_veth_dst, 0, "ifindex_veth_dst")) + result->ifindex_dst = if_nametoindex("dst"); + if (!ASSERT_GT(result->ifindex_dst, 0, "ifindex_dst")) goto fail; - result->ifindex_veth_dst_fwd = if_nametoindex("veth_dst_fwd"); - if (!ASSERT_GT(result->ifindex_veth_dst_fwd, 0, "ifindex_veth_dst_fwd")) + result->ifindex_dst_fwd = if_nametoindex("dst_fwd"); + if (!ASSERT_GT(result->ifindex_dst_fwd, 0, "ifindex_dst_fwd")) goto fail; - SYS(fail, "ip link set veth_src netns " NS_SRC); - SYS(fail, "ip link set veth_src_fwd netns " NS_FWD); - SYS(fail, "ip link set veth_dst_fwd netns " NS_FWD); - SYS(fail, "ip link set veth_dst netns " NS_DST); + SYS(fail, "ip link set src netns " NS_SRC); + SYS(fail, "ip link set src_fwd netns " NS_FWD); + SYS(fail, "ip link set dst_fwd netns " NS_FWD); + SYS(fail, "ip link set dst netns " NS_DST); /** setup in 'src' namespace */ nstoken = open_netns(NS_SRC); if (!ASSERT_OK_PTR(nstoken, "setns src")) goto fail; - SYS(fail, "ip addr add " IP4_SRC "/32 dev veth_src"); - SYS(fail, "ip addr add " IP6_SRC "/128 dev veth_src nodad"); - SYS(fail, "ip link set dev veth_src up"); + SYS(fail, "ip addr add " IP4_SRC "/32 dev src"); + SYS(fail, "ip addr add " IP6_SRC "/128 dev src nodad"); + SYS(fail, "ip link set dev src up"); - SYS(fail, "ip route add " IP4_DST "/32 dev veth_src scope global"); - SYS(fail, "ip route add " IP4_NET "/16 dev veth_src scope global"); - SYS(fail, "ip route add " IP6_DST "/128 dev veth_src scope global"); + SYS(fail, "ip route add " IP4_DST "/32 dev src scope global"); + SYS(fail, "ip route add " IP4_NET "/16 dev src scope global"); + SYS(fail, "ip route add " IP6_DST "/128 dev src scope global"); - SYS(fail, "ip neigh add " IP4_DST " dev veth_src lladdr %s", - veth_src_fwd_addr); - SYS(fail, "ip neigh add " IP6_DST " dev veth_src lladdr %s", - veth_src_fwd_addr); + if (result->dev_mode == MODE_VETH) { + SYS(fail, "ip neigh add " IP4_DST " dev src lladdr %s", + src_fwd_addr); + SYS(fail, "ip neigh add " IP6_DST " dev src lladdr %s", + src_fwd_addr); + } close_netns(nstoken); @@ -201,15 +210,15 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) * needs v4 one in order to start ARP probing. IP4_NET route is added * to the endpoints so that the ARP processing will reply. */ - SYS(fail, "ip addr add " IP4_SLL "/32 dev veth_src_fwd"); - SYS(fail, "ip addr add " IP4_DLL "/32 dev veth_dst_fwd"); - SYS(fail, "ip link set dev veth_src_fwd up"); - SYS(fail, "ip link set dev veth_dst_fwd up"); + SYS(fail, "ip addr add " IP4_SLL "/32 dev src_fwd"); + SYS(fail, "ip addr add " IP4_DLL "/32 dev dst_fwd"); + SYS(fail, "ip link set dev src_fwd up"); + SYS(fail, "ip link set dev dst_fwd up"); - SYS(fail, "ip route add " IP4_SRC "/32 dev veth_src_fwd scope global"); - SYS(fail, "ip route add " IP6_SRC "/128 dev veth_src_fwd scope global"); - SYS(fail, "ip route add " IP4_DST "/32 dev veth_dst_fwd scope global"); - SYS(fail, "ip route add " IP6_DST "/128 dev veth_dst_fwd scope global"); + SYS(fail, "ip route add " IP4_SRC "/32 dev src_fwd scope global"); + SYS(fail, "ip route add " IP6_SRC "/128 dev src_fwd scope global"); + SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global"); + SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global"); close_netns(nstoken); @@ -218,16 +227,18 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) if (!ASSERT_OK_PTR(nstoken, "setns dst")) goto fail; - SYS(fail, "ip addr add " IP4_DST "/32 dev veth_dst"); - SYS(fail, "ip addr add " IP6_DST "/128 dev veth_dst nodad"); - SYS(fail, "ip link set dev veth_dst up"); + SYS(fail, "ip addr add " IP4_DST "/32 dev dst"); + SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad"); + SYS(fail, "ip link set dev dst up"); - SYS(fail, "ip route add " IP4_SRC "/32 dev veth_dst scope global"); - SYS(fail, "ip route add " IP4_NET "/16 dev veth_dst scope global"); - SYS(fail, "ip route add " IP6_SRC "/128 dev veth_dst scope global"); + SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global"); + SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global"); + SYS(fail, "ip route add " IP6_SRC "/128 dev dst scope global"); - SYS(fail, "ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD); - SYS(fail, "ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD); + if (result->dev_mode == MODE_VETH) { + SYS(fail, "ip neigh add " IP4_SRC " dev dst lladdr " MAC_DST_FWD); + SYS(fail, "ip neigh add " IP6_SRC " dev dst lladdr " MAC_DST_FWD); + } close_netns(nstoken); @@ -293,23 +304,23 @@ static int netns_load_bpf(const struct bpf_program *src_prog, const struct bpf_program *chk_prog, const struct netns_setup_result *setup_result) { - LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd); - LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd); + LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd); + LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd); int err; - /* tc qdisc add dev veth_src_fwd clsact */ - QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd); - /* tc filter add dev veth_src_fwd ingress bpf da src_prog */ - XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS, src_prog, 0); - /* tc filter add dev veth_src_fwd egress bpf da chk_prog */ - XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS, chk_prog, 0); + /* tc qdisc add dev src_fwd clsact */ + QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd); + /* tc filter add dev src_fwd ingress bpf da src_prog */ + XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS, src_prog, 0); + /* tc filter add dev src_fwd egress bpf da chk_prog */ + XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS, chk_prog, 0); - /* tc qdisc add dev veth_dst_fwd clsact */ - QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd); - /* tc filter add dev veth_dst_fwd ingress bpf da dst_prog */ - XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, dst_prog, 0); - /* tc filter add dev veth_dst_fwd egress bpf da chk_prog */ - XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, chk_prog, 0); + /* tc qdisc add dev dst_fwd clsact */ + QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd); + /* tc filter add dev dst_fwd ingress bpf da dst_prog */ + XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, dst_prog, 0); + /* tc filter add dev dst_fwd egress bpf da chk_prog */ + XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, chk_prog, 0); return 0; fail: @@ -539,10 +550,10 @@ done: static int netns_load_dtime_bpf(struct test_tc_dtime *skel, const struct netns_setup_result *setup_result) { - LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd); - LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd); - LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src); - LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst); + LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd); + LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd); + LIBBPF_OPTS(bpf_tc_hook, qdisc_src); + LIBBPF_OPTS(bpf_tc_hook, qdisc_dst); struct nstoken *nstoken; int err; @@ -550,58 +561,58 @@ static int netns_load_dtime_bpf(struct test_tc_dtime *skel, nstoken = open_netns(NS_SRC); if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC)) return -1; - /* tc qdisc add dev veth_src clsact */ - QDISC_CLSACT_CREATE(&qdisc_veth_src, setup_result->ifindex_veth_src); - /* tc filter add dev veth_src ingress bpf da ingress_host */ - XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0); - /* tc filter add dev veth_src egress bpf da egress_host */ - XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_EGRESS, skel->progs.egress_host, 0); + /* tc qdisc add dev src clsact */ + QDISC_CLSACT_CREATE(&qdisc_src, setup_result->ifindex_src); + /* tc filter add dev src ingress bpf da ingress_host */ + XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0); + /* tc filter add dev src egress bpf da egress_host */ + XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_EGRESS, skel->progs.egress_host, 0); close_netns(nstoken); /* setup ns_dst tc progs */ nstoken = open_netns(NS_DST); if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST)) return -1; - /* tc qdisc add dev veth_dst clsact */ - QDISC_CLSACT_CREATE(&qdisc_veth_dst, setup_result->ifindex_veth_dst); - /* tc filter add dev veth_dst ingress bpf da ingress_host */ - XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0); - /* tc filter add dev veth_dst egress bpf da egress_host */ - XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0); + /* tc qdisc add dev dst clsact */ + QDISC_CLSACT_CREATE(&qdisc_dst, setup_result->ifindex_dst); + /* tc filter add dev dst ingress bpf da ingress_host */ + XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0); + /* tc filter add dev dst egress bpf da egress_host */ + XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0); close_netns(nstoken); /* setup ns_fwd tc progs */ nstoken = open_netns(NS_FWD); if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD)) return -1; - /* tc qdisc add dev veth_dst_fwd clsact */ - QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd); - /* tc filter add dev veth_dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */ - XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, + /* tc qdisc add dev dst_fwd clsact */ + QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd); + /* tc filter add dev dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */ + XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, skel->progs.ingress_fwdns_prio100, 100); - /* tc filter add dev veth_dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */ - XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, + /* tc filter add dev dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */ + XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, skel->progs.ingress_fwdns_prio101, 101); - /* tc filter add dev veth_dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */ - XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, + /* tc filter add dev dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */ + XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, skel->progs.egress_fwdns_prio100, 100); - /* tc filter add dev veth_dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */ - XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, + /* tc filter add dev dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */ + XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, skel->progs.egress_fwdns_prio101, 101); - /* tc qdisc add dev veth_src_fwd clsact */ - QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd); - /* tc filter add dev veth_src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */ - XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS, + /* tc qdisc add dev src_fwd clsact */ + QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd); + /* tc filter add dev src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */ + XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS, skel->progs.ingress_fwdns_prio100, 100); - /* tc filter add dev veth_src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */ - XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS, + /* tc filter add dev src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */ + XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS, skel->progs.ingress_fwdns_prio101, 101); - /* tc filter add dev veth_src_fwd egress prio 100 bpf da egress_fwdns_prio100 */ - XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS, + /* tc filter add dev src_fwd egress prio 100 bpf da egress_fwdns_prio100 */ + XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS, skel->progs.egress_fwdns_prio100, 100); - /* tc filter add dev veth_src_fwd egress prio 101 bpf da egress_fwdns_prio101 */ - XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS, + /* tc filter add dev src_fwd egress prio 101 bpf da egress_fwdns_prio101 */ + XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS, skel->progs.egress_fwdns_prio101, 101); close_netns(nstoken); return 0; @@ -777,8 +788,8 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result) if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open")) return; - skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; - skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd; err = test_tc_dtime__load(skel); if (!ASSERT_OK(err, "test_tc_dtime__load")) @@ -868,8 +879,8 @@ static void test_tc_redirect_neigh(struct netns_setup_result *setup_result) if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open")) goto done; - skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; - skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd; err = test_tc_neigh__load(skel); if (!ASSERT_OK(err, "test_tc_neigh__load")) @@ -904,8 +915,8 @@ static void test_tc_redirect_peer(struct netns_setup_result *setup_result) if (!ASSERT_OK_PTR(skel, "test_tc_peer__open")) goto done; - skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; - skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd; err = test_tc_peer__load(skel); if (!ASSERT_OK(err, "test_tc_peer__load")) @@ -996,7 +1007,7 @@ static int tun_relay_loop(int src_fd, int target_fd) static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) { LIBBPF_OPTS(bpf_tc_hook, qdisc_tun_fwd); - LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd); + LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd); struct test_tc_peer *skel = NULL; struct nstoken *nstoken = NULL; int err; @@ -1045,7 +1056,7 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) goto fail; skel->rodata->IFINDEX_SRC = ifindex; - skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd; err = test_tc_peer__load(skel); if (!ASSERT_OK(err, "test_tc_peer__load")) @@ -1053,19 +1064,19 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) /* Load "tc_src_l3" to the tun_fwd interface to redirect packets * towards dst, and "tc_dst" to redirect packets - * and "tc_chk" on veth_dst_fwd to drop non-redirected packets. + * and "tc_chk" on dst_fwd to drop non-redirected packets. */ /* tc qdisc add dev tun_fwd clsact */ QDISC_CLSACT_CREATE(&qdisc_tun_fwd, ifindex); /* tc filter add dev tun_fwd ingress bpf da tc_src_l3 */ XGRESS_FILTER_ADD(&qdisc_tun_fwd, BPF_TC_INGRESS, skel->progs.tc_src_l3, 0); - /* tc qdisc add dev veth_dst_fwd clsact */ - QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd); - /* tc filter add dev veth_dst_fwd ingress bpf da tc_dst_l3 */ - XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0); - /* tc filter add dev veth_dst_fwd egress bpf da tc_chk */ - XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0); + /* tc qdisc add dev dst_fwd clsact */ + QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd); + /* tc filter add dev dst_fwd ingress bpf da tc_dst_l3 */ + XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0); + /* tc filter add dev dst_fwd egress bpf da tc_chk */ + XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0); /* Setup route and neigh tables */ SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24"); @@ -1074,17 +1085,17 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad"); SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad"); - SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global"); + SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev src scope global"); SYS(fail, "ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD " dev tun_src scope global"); - SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global"); - SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global"); + SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev dst scope global"); + SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev src scope global"); SYS(fail, "ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD " dev tun_src scope global"); - SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global"); + SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev dst scope global"); - SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); - SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev dst lladdr " MAC_DST_FWD); + SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev dst lladdr " MAC_DST_FWD); if (!ASSERT_OK(set_forwarding(false), "disable forwarding")) goto fail; @@ -1106,9 +1117,9 @@ fail: close_netns(nstoken); } -#define RUN_TEST(name) \ +#define RUN_TEST(name, mode) \ ({ \ - struct netns_setup_result setup_result; \ + struct netns_setup_result setup_result = { .dev_mode = mode, }; \ if (test__start_subtest(#name)) \ if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \ if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \ @@ -1122,11 +1133,11 @@ static void *test_tc_redirect_run_tests(void *arg) { netns_setup_namespaces_nofail("delete"); - RUN_TEST(tc_redirect_peer); - RUN_TEST(tc_redirect_peer_l3); - RUN_TEST(tc_redirect_neigh); - RUN_TEST(tc_redirect_neigh_fib); - RUN_TEST(tc_redirect_dtime); + RUN_TEST(tc_redirect_peer, MODE_VETH); + RUN_TEST(tc_redirect_peer_l3, MODE_VETH); + RUN_TEST(tc_redirect_neigh, MODE_VETH); + RUN_TEST(tc_redirect_neigh_fib, MODE_VETH); + RUN_TEST(tc_redirect_dtime, MODE_VETH); return NULL; } From adfeae2d243d9e5b83d094af481d189156b11779 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 14 Nov 2023 01:42:20 +0100 Subject: [PATCH 248/299] selftests/bpf: Add netkit to tc_redirect selftest Extend the existing tc_redirect selftest to also cover netkit devices for exercising the bpf_redirect_peer() code paths, so that we have both veth as well as netkit covered, all tests still pass after this change. Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Reviewed-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20231114004220.6495-9-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- .../selftests/bpf/prog_tests/tc_redirect.c | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 407ff4e9bc78..518f143c5b0f 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -24,6 +24,7 @@ #include "test_progs.h" #include "network_helpers.h" +#include "netlink_helpers.h" #include "test_tc_neigh_fib.skel.h" #include "test_tc_neigh.skel.h" #include "test_tc_peer.skel.h" @@ -112,6 +113,7 @@ static void netns_setup_namespaces_nofail(const char *verb) enum dev_mode { MODE_VETH, + MODE_NETKIT, }; struct netns_setup_result { @@ -142,10 +144,51 @@ static int get_ifaddr(const char *name, char *ifaddr) return 0; } +static int create_netkit(int mode, char *prim, char *peer) +{ + struct rtattr *linkinfo, *data, *peer_info; + struct rtnl_handle rth = { .fd = -1 }; + const char *type = "netkit"; + struct { + struct nlmsghdr n; + struct ifinfomsg i; + char buf[1024]; + } req = {}; + int err; + + err = rtnl_open(&rth, 0); + if (!ASSERT_OK(err, "open_rtnetlink")) + return err; + + memset(&req, 0, sizeof(req)); + req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); + req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; + req.n.nlmsg_type = RTM_NEWLINK; + req.i.ifi_family = AF_UNSPEC; + + addattr_l(&req.n, sizeof(req), IFLA_IFNAME, prim, strlen(prim)); + linkinfo = addattr_nest(&req.n, sizeof(req), IFLA_LINKINFO); + addattr_l(&req.n, sizeof(req), IFLA_INFO_KIND, type, strlen(type)); + data = addattr_nest(&req.n, sizeof(req), IFLA_INFO_DATA); + addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode); + peer_info = addattr_nest(&req.n, sizeof(req), IFLA_NETKIT_PEER_INFO); + req.n.nlmsg_len += sizeof(struct ifinfomsg); + addattr_l(&req.n, sizeof(req), IFLA_IFNAME, peer, strlen(peer)); + addattr_nest_end(&req.n, peer_info); + addattr_nest_end(&req.n, data); + addattr_nest_end(&req.n, linkinfo); + + err = rtnl_talk(&rth, &req.n, NULL); + ASSERT_OK(err, "talk_rtnetlink"); + rtnl_close(&rth); + return err; +} + static int netns_setup_links_and_routes(struct netns_setup_result *result) { struct nstoken *nstoken = NULL; char src_fwd_addr[IFADDR_STR_LEN+1] = {}; + int err; if (result->dev_mode == MODE_VETH) { SYS(fail, "ip link add src type veth peer name src_fwd"); @@ -153,6 +196,13 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) SYS(fail, "ip link set dst_fwd address " MAC_DST_FWD); SYS(fail, "ip link set dst address " MAC_DST); + } else if (result->dev_mode == MODE_NETKIT) { + err = create_netkit(NETKIT_L3, "src", "src_fwd"); + if (!ASSERT_OK(err, "create_ifindex_src")) + goto fail; + err = create_netkit(NETKIT_L3, "dst", "dst_fwd"); + if (!ASSERT_OK(err, "create_ifindex_dst")) + goto fail; } if (get_ifaddr("src_fwd", src_fwd_addr)) @@ -1134,7 +1184,9 @@ static void *test_tc_redirect_run_tests(void *arg) netns_setup_namespaces_nofail("delete"); RUN_TEST(tc_redirect_peer, MODE_VETH); + RUN_TEST(tc_redirect_peer, MODE_NETKIT); RUN_TEST(tc_redirect_peer_l3, MODE_VETH); + RUN_TEST(tc_redirect_peer_l3, MODE_NETKIT); RUN_TEST(tc_redirect_neigh, MODE_VETH); RUN_TEST(tc_redirect_neigh_fib, MODE_VETH); RUN_TEST(tc_redirect_dtime, MODE_VETH); From 977bc146d4eb7070118d8a974919b33bb52732b4 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:51 +0200 Subject: [PATCH 249/299] selftests/bpf: track tcp payload offset as scalar in xdp_synproxy This change prepares syncookie_{tc,xdp} for update in callbakcs verification logic. To allow bpf_loop() verification converge when multiple callback itreations are considered: - track offset inside TCP payload explicitly, not as a part of the pointer; - make sure that offset does not exceed MAX_PACKET_OFF enforced by verifier; - make sure that offset is tracked as unbound scalar between iterations, otherwise verifier won't be able infer that bpf_loop callback reaches identical states. Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/progs/xdp_synproxy_kern.c | 84 ++++++++++++------- 1 file changed, 52 insertions(+), 32 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c index e959336c7a73..80f620602d50 100644 --- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c +++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c @@ -53,6 +53,8 @@ #define DEFAULT_TTL 64 #define MAX_ALLOWED_PORTS 8 +#define MAX_PACKET_OFF 0xffff + #define swap(a, b) \ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) @@ -183,63 +185,76 @@ static __always_inline __u32 tcp_clock_ms(void) } struct tcpopt_context { - __u8 *ptr; - __u8 *end; + void *data; void *data_end; __be32 *tsecr; __u8 wscale; bool option_timestamp; bool option_sack; + __u32 off; }; +static __always_inline u8 *next(struct tcpopt_context *ctx, __u32 sz) +{ + __u64 off = ctx->off; + __u8 *data; + + /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */ + if (off > MAX_PACKET_OFF - sz) + return NULL; + + data = ctx->data + off; + barrier_var(data); + if (data + sz >= ctx->data_end) + return NULL; + + ctx->off += sz; + return data; +} + static int tscookie_tcpopt_parse(struct tcpopt_context *ctx) { - __u8 opcode, opsize; + __u8 *opcode, *opsize, *wscale, *tsecr; + __u32 off = ctx->off; - if (ctx->ptr >= ctx->end) - return 1; - if (ctx->ptr >= ctx->data_end) + opcode = next(ctx, 1); + if (!opcode) return 1; - opcode = ctx->ptr[0]; - - if (opcode == TCPOPT_EOL) + if (*opcode == TCPOPT_EOL) return 1; - if (opcode == TCPOPT_NOP) { - ++ctx->ptr; + if (*opcode == TCPOPT_NOP) return 0; - } - if (ctx->ptr + 1 >= ctx->end) - return 1; - if (ctx->ptr + 1 >= ctx->data_end) - return 1; - opsize = ctx->ptr[1]; - if (opsize < 2) + opsize = next(ctx, 1); + if (!opsize || *opsize < 2) return 1; - if (ctx->ptr + opsize > ctx->end) - return 1; - - switch (opcode) { + switch (*opcode) { case TCPOPT_WINDOW: - if (opsize == TCPOLEN_WINDOW && ctx->ptr + TCPOLEN_WINDOW <= ctx->data_end) - ctx->wscale = ctx->ptr[2] < TCP_MAX_WSCALE ? ctx->ptr[2] : TCP_MAX_WSCALE; + wscale = next(ctx, 1); + if (!wscale) + return 1; + if (*opsize == TCPOLEN_WINDOW) + ctx->wscale = *wscale < TCP_MAX_WSCALE ? *wscale : TCP_MAX_WSCALE; break; case TCPOPT_TIMESTAMP: - if (opsize == TCPOLEN_TIMESTAMP && ctx->ptr + TCPOLEN_TIMESTAMP <= ctx->data_end) { + tsecr = next(ctx, 4); + if (!tsecr) + return 1; + if (*opsize == TCPOLEN_TIMESTAMP) { ctx->option_timestamp = true; /* Client's tsval becomes our tsecr. */ - *ctx->tsecr = get_unaligned((__be32 *)(ctx->ptr + 2)); + *ctx->tsecr = get_unaligned((__be32 *)tsecr); } break; case TCPOPT_SACK_PERM: - if (opsize == TCPOLEN_SACK_PERM) + if (*opsize == TCPOLEN_SACK_PERM) ctx->option_sack = true; break; } - ctx->ptr += opsize; + ctx->off = off + *opsize; return 0; } @@ -256,16 +271,21 @@ static int tscookie_tcpopt_parse_batch(__u32 index, void *context) static __always_inline bool tscookie_init(struct tcphdr *tcp_header, __u16 tcp_len, __be32 *tsval, - __be32 *tsecr, void *data_end) + __be32 *tsecr, void *data, void *data_end) { struct tcpopt_context loop_ctx = { - .ptr = (__u8 *)(tcp_header + 1), - .end = (__u8 *)tcp_header + tcp_len, + .data = data, .data_end = data_end, .tsecr = tsecr, .wscale = TS_OPT_WSCALE_MASK, .option_timestamp = false, .option_sack = false, + /* Note: currently verifier would track .off as unbound scalar. + * In case if verifier would at some point get smarter and + * compute bounded value for this var, beware that it might + * hinder bpf_loop() convergence validation. + */ + .off = (__u8 *)(tcp_header + 1) - (__u8 *)data, }; u32 cookie; @@ -635,7 +655,7 @@ static __always_inline int syncookie_handle_syn(struct header_pointers *hdr, cookie = (__u32)value; if (tscookie_init((void *)hdr->tcp, hdr->tcp_len, - &tsopt_buf[0], &tsopt_buf[1], data_end)) + &tsopt_buf[0], &tsopt_buf[1], data, data_end)) tsopt = tsopt_buf; /* Check that there is enough space for a SYNACK. It also covers From 87eb0152bcc102ecbda866978f4e54db5a3be1ef Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:52 +0200 Subject: [PATCH 250/299] selftests/bpf: track string payload offset as scalar in strobemeta This change prepares strobemeta for update in callbacks verification logic. To allow bpf_loop() verification converge when multiple callback iterations are considered: - track offset inside strobemeta_payload->payload directly as scalar value; - at each iteration make sure that remaining strobemeta_payload->payload capacity is sufficient for execution of read_{map,str}_var functions; - make sure that offset is tracked as unbound scalar between iterations, otherwise verifier won't be able infer that bpf_loop callback reaches identical states. Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-3-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/progs/strobemeta.h | 78 ++++++++++++------- 1 file changed, 48 insertions(+), 30 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index e02cfd380746..40df2cc26eaf 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -24,9 +24,11 @@ struct task_struct {}; #define STACK_TABLE_EPOCH_SHIFT 20 #define STROBE_MAX_STR_LEN 1 #define STROBE_MAX_CFGS 32 +#define READ_MAP_VAR_PAYLOAD_CAP \ + ((1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN) #define STROBE_MAX_PAYLOAD \ (STROBE_MAX_STRS * STROBE_MAX_STR_LEN + \ - STROBE_MAX_MAPS * (1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN) + STROBE_MAX_MAPS * READ_MAP_VAR_PAYLOAD_CAP) struct strobe_value_header { /* @@ -355,7 +357,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, size_t idx, void *tls_base, struct strobe_value_generic *value, struct strobemeta_payload *data, - void *payload) + size_t off) { void *location; uint64_t len; @@ -366,7 +368,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, return 0; bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location); - len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr); + len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr); /* * if bpf_probe_read_user_str returns error (<0), due to casting to * unsinged int, it will become big number, so next check is @@ -378,14 +380,14 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, return 0; data->str_lens[idx] = len; - return len; + return off + len; } -static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, - size_t idx, void *tls_base, - struct strobe_value_generic *value, - struct strobemeta_payload *data, - void *payload) +static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg, + size_t idx, void *tls_base, + struct strobe_value_generic *value, + struct strobemeta_payload *data, + size_t off) { struct strobe_map_descr* descr = &data->map_descrs[idx]; struct strobe_map_raw map; @@ -397,11 +399,11 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, location = calc_location(&cfg->map_locs[idx], tls_base); if (!location) - return payload; + return off; bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location); if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr)) - return payload; + return off; descr->id = map.id; descr->cnt = map.cnt; @@ -410,10 +412,10 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, data->req_meta_valid = 1; } - len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag); + len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.tag); if (len <= STROBE_MAX_STR_LEN) { descr->tag_len = len; - payload += len; + off += len; } #ifdef NO_UNROLL @@ -426,22 +428,22 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, break; descr->key_lens[i] = 0; - len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, + len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.entries[i].key); if (len <= STROBE_MAX_STR_LEN) { descr->key_lens[i] = len; - payload += len; + off += len; } descr->val_lens[i] = 0; - len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, + len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.entries[i].val); if (len <= STROBE_MAX_STR_LEN) { descr->val_lens[i] = len; - payload += len; + off += len; } } - return payload; + return off; } #ifdef USE_BPF_LOOP @@ -455,14 +457,20 @@ struct read_var_ctx { struct strobemeta_payload *data; void *tls_base; struct strobemeta_cfg *cfg; - void *payload; + size_t payload_off; /* value gets mutated */ struct strobe_value_generic *value; enum read_type type; }; -static int read_var_callback(__u32 index, struct read_var_ctx *ctx) +static int read_var_callback(__u64 index, struct read_var_ctx *ctx) { + /* lose precision info for ctx->payload_off, verifier won't track + * double xor, barrier_var() is needed to force clang keep both xors. + */ + ctx->payload_off ^= index; + barrier_var(ctx->payload_off); + ctx->payload_off ^= index; switch (ctx->type) { case READ_INT_VAR: if (index >= STROBE_MAX_INTS) @@ -472,14 +480,18 @@ static int read_var_callback(__u32 index, struct read_var_ctx *ctx) case READ_MAP_VAR: if (index >= STROBE_MAX_MAPS) return 1; - ctx->payload = read_map_var(ctx->cfg, index, ctx->tls_base, - ctx->value, ctx->data, ctx->payload); + if (ctx->payload_off > sizeof(ctx->data->payload) - READ_MAP_VAR_PAYLOAD_CAP) + return 1; + ctx->payload_off = read_map_var(ctx->cfg, index, ctx->tls_base, + ctx->value, ctx->data, ctx->payload_off); break; case READ_STR_VAR: if (index >= STROBE_MAX_STRS) return 1; - ctx->payload += read_str_var(ctx->cfg, index, ctx->tls_base, - ctx->value, ctx->data, ctx->payload); + if (ctx->payload_off > sizeof(ctx->data->payload) - STROBE_MAX_STR_LEN) + return 1; + ctx->payload_off = read_str_var(ctx->cfg, index, ctx->tls_base, + ctx->value, ctx->data, ctx->payload_off); break; } return 0; @@ -501,7 +513,8 @@ static void *read_strobe_meta(struct task_struct *task, pid_t pid = bpf_get_current_pid_tgid() >> 32; struct strobe_value_generic value = {0}; struct strobemeta_cfg *cfg; - void *tls_base, *payload; + size_t payload_off; + void *tls_base; cfg = bpf_map_lookup_elem(&strobemeta_cfgs, &pid); if (!cfg) @@ -509,7 +522,7 @@ static void *read_strobe_meta(struct task_struct *task, data->int_vals_set_mask = 0; data->req_meta_valid = 0; - payload = data->payload; + payload_off = 0; /* * we don't have struct task_struct definition, it should be: * tls_base = (void *)task->thread.fsbase; @@ -522,7 +535,7 @@ static void *read_strobe_meta(struct task_struct *task, .tls_base = tls_base, .value = &value, .data = data, - .payload = payload, + .payload_off = 0, }; int err; @@ -540,6 +553,11 @@ static void *read_strobe_meta(struct task_struct *task, err = bpf_loop(STROBE_MAX_MAPS, read_var_callback, &ctx, 0); if (err != STROBE_MAX_MAPS) return NULL; + + payload_off = ctx.payload_off; + /* this should not really happen, here only to satisfy verifer */ + if (payload_off > sizeof(data->payload)) + payload_off = sizeof(data->payload); #else #ifdef NO_UNROLL #pragma clang loop unroll(disable) @@ -555,7 +573,7 @@ static void *read_strobe_meta(struct task_struct *task, #pragma unroll #endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_STRS; ++i) { - payload += read_str_var(cfg, i, tls_base, &value, data, payload); + payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off); } #ifdef NO_UNROLL #pragma clang loop unroll(disable) @@ -563,7 +581,7 @@ static void *read_strobe_meta(struct task_struct *task, #pragma unroll #endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_MAPS; ++i) { - payload = read_map_var(cfg, i, tls_base, &value, data, payload); + payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off); } #endif /* USE_BPF_LOOP */ @@ -571,7 +589,7 @@ static void *read_strobe_meta(struct task_struct *task, * return pointer right after end of payload, so it's possible to * calculate exact amount of useful data that needs to be sent */ - return payload; + return &data->payload[payload_off]; } SEC("raw_tracepoint/kfree_skb") From f40bfd1679446b22d321e64a1fa98b7d07d2be08 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:53 +0200 Subject: [PATCH 251/299] selftests/bpf: fix bpf_loop_bench for new callback verification scheme MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a preparatory change. A follow-up patch "bpf: verify callbacks as if they are called unknown number of times" changes logic for callbacks handling. While previously callbacks were verified as a single function call, new scheme takes into account that callbacks could be executed unknown number of times. This has dire implications for bpf_loop_bench: SEC("fentry/" SYS_PREFIX "sys_getpgid") int benchmark(void *ctx) { for (int i = 0; i < 1000; i++) { bpf_loop(nr_loops, empty_callback, NULL, 0); __sync_add_and_fetch(&hits, nr_loops); } return 0; } W/o callbacks change verifier sees it as a 1000 calls to empty_callback(). However, with callbacks change things become exponential: - i=0: state exploring empty_callback is scheduled with i=0 (a); - i=1: state exploring empty_callback is scheduled with i=1; ... - i=999: state exploring empty_callback is scheduled with i=999; - state (a) is popped from stack; - i=1: state exploring empty_callback is scheduled with i=1; ... Avoid this issue by rewriting outer loop as bpf_loop(). Unfortunately, this adds a function call to a loop at runtime, which negatively affects performance: throughput latency before: 149.919 ± 0.168 M ops/s, 6.670 ns/op after : 137.040 ± 0.187 M ops/s, 7.297 ns/op Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-4-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_loop_bench.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c index 4ce76eb064c4..d461746fd3c1 100644 --- a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c +++ b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c @@ -15,13 +15,16 @@ static int empty_callback(__u32 index, void *data) return 0; } +static int outer_loop(__u32 index, void *data) +{ + bpf_loop(nr_loops, empty_callback, NULL, 0); + __sync_add_and_fetch(&hits, nr_loops); + return 0; +} + SEC("fentry/" SYS_PREFIX "sys_getpgid") int benchmark(void *ctx) { - for (int i = 0; i < 1000; i++) { - bpf_loop(nr_loops, empty_callback, NULL, 0); - - __sync_add_and_fetch(&hits, nr_loops); - } + bpf_loop(1000, outer_loop, NULL, 0); return 0; } From 683b96f9606ab7308ffb23c46ab43cecdef8a241 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:54 +0200 Subject: [PATCH 252/299] bpf: extract __check_reg_arg() utility function Split check_reg_arg() into two utility functions: - check_reg_arg() operating on registers from current verifier state; - __check_reg_arg() operating on a specific set of registers passed as a parameter; The __check_reg_arg() function would be used by a follow-up change for callbacks handling. Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-5-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6da370a047fe..e6e1bcfe00f5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3439,13 +3439,11 @@ static void mark_insn_zext(struct bpf_verifier_env *env, reg->subreg_def = DEF_NOT_SUBREG; } -static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, - enum reg_arg_type t) +static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno, + enum reg_arg_type t) { - struct bpf_verifier_state *vstate = env->cur_state; - struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; - struct bpf_reg_state *reg, *regs = state->regs; + struct bpf_reg_state *reg; bool rw64; if (regno >= MAX_BPF_REG) { @@ -3486,6 +3484,15 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, return 0; } +static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, + enum reg_arg_type t) +{ + struct bpf_verifier_state *vstate = env->cur_state; + struct bpf_func_state *state = vstate->frame[vstate->curframe]; + + return __check_reg_arg(env, state->regs, regno, t); +} + static void mark_jmp_point(struct bpf_verifier_env *env, int idx) { env->insn_aux_data[idx].jmp_point = true; @@ -9350,7 +9357,7 @@ static void clear_caller_saved_regs(struct bpf_verifier_env *env, /* after the call registers r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); - check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); + __check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK); } } From 58124a98cb8eda69d248d7f1de954c8b2767c945 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:55 +0200 Subject: [PATCH 253/299] bpf: extract setup_func_entry() utility function Move code for simulated stack frame creation to a separate utility function. This function would be used in the follow-up change for callbacks handling. Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-6-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 84 ++++++++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 36 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e6e1bcfe00f5..68ee4803d3a2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9370,11 +9370,10 @@ static int set_callee_state(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee, int insn_idx); -static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, - int *insn_idx, int subprog, - set_callee_state_fn set_callee_state_cb) +static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite, + set_callee_state_fn set_callee_state_cb, + struct bpf_verifier_state *state) { - struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; int err; @@ -9384,13 +9383,53 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -E2BIG; } - caller = state->frame[state->curframe]; if (state->frame[state->curframe + 1]) { verbose(env, "verifier bug. Frame %d already allocated\n", state->curframe + 1); return -EFAULT; } + caller = state->frame[state->curframe]; + callee = kzalloc(sizeof(*callee), GFP_KERNEL); + if (!callee) + return -ENOMEM; + state->frame[state->curframe + 1] = callee; + + /* callee cannot access r0, r6 - r9 for reading and has to write + * into its own stack before reading from it. + * callee can read/write into caller's stack + */ + init_func_state(env, callee, + /* remember the callsite, it will be used by bpf_exit */ + callsite, + state->curframe + 1 /* frameno within this callchain */, + subprog /* subprog number within this prog */); + /* Transfer references to the callee */ + err = copy_reference_state(callee, caller); + err = err ?: set_callee_state_cb(env, caller, callee, callsite); + if (err) + goto err_out; + + /* only increment it after check_reg_arg() finished */ + state->curframe++; + + return 0; + +err_out: + free_func_state(callee); + state->frame[state->curframe + 1] = NULL; + return err; +} + +static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int *insn_idx, int subprog, + set_callee_state_fn set_callee_state_cb) +{ + struct bpf_verifier_state *state = env->cur_state; + struct bpf_func_state *caller, *callee; + int err; + + caller = state->frame[state->curframe]; err = btf_check_subprog_call(env, subprog, caller->regs); if (err == -EFAULT) return err; @@ -9460,35 +9499,12 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn return 0; } - callee = kzalloc(sizeof(*callee), GFP_KERNEL); - if (!callee) - return -ENOMEM; - state->frame[state->curframe + 1] = callee; - - /* callee cannot access r0, r6 - r9 for reading and has to write - * into its own stack before reading from it. - * callee can read/write into caller's stack - */ - init_func_state(env, callee, - /* remember the callsite, it will be used by bpf_exit */ - *insn_idx /* callsite */, - state->curframe + 1 /* frameno within this callchain */, - subprog /* subprog number within this prog */); - - /* Transfer references to the callee */ - err = copy_reference_state(callee, caller); + err = setup_func_entry(env, subprog, *insn_idx, set_callee_state_cb, state); if (err) - goto err_out; - - err = set_callee_state_cb(env, caller, callee, *insn_idx); - if (err) - goto err_out; + return err; clear_caller_saved_regs(env, caller->regs); - /* only increment it after check_reg_arg() finished */ - state->curframe++; - /* and go analyze first insn of the callee */ *insn_idx = env->subprog_info[subprog].start - 1; @@ -9496,14 +9512,10 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn verbose(env, "caller:\n"); print_verifier_state(env, caller, true); verbose(env, "callee:\n"); - print_verifier_state(env, callee, true); + print_verifier_state(env, state->frame[state->curframe], true); } - return 0; -err_out: - free_func_state(callee); - state->frame[state->curframe + 1] = NULL; - return err; + return 0; } int map_set_for_each_callback_args(struct bpf_verifier_env *env, From ab5cfac139ab8576fb54630d4cca23c3e690ee90 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:56 +0200 Subject: [PATCH 254/299] bpf: verify callbacks as if they are called unknown number of times Prior to this patch callbacks were handled as regular function calls, execution of callback body was modeled exactly once. This patch updates callbacks handling logic as follows: - introduces a function push_callback_call() that schedules callback body verification in env->head stack; - updates prepare_func_exit() to reschedule callback body verification upon BPF_EXIT; - as calls to bpf_*_iter_next(), calls to callback invoking functions are marked as checkpoints; - is_state_visited() is updated to stop callback based iteration when some identical parent state is found. Paths with callback function invoked zero times are now verified first, which leads to necessity to modify some selftests: - the following negative tests required adding release/unlock/drop calls to avoid previously masked unrelated error reports: - cb_refs.c:underflow_prog - exceptions_fail.c:reject_rbtree_add_throw - exceptions_fail.c:reject_with_cp_reference - the following precision tracking selftests needed change in expected log trace: - verifier_subprog_precision.c:callback_result_precise (note: r0 precision is no longer propagated inside callback and I think this is a correct behavior) - verifier_subprog_precision.c:parent_callee_saved_reg_precise_with_callback - verifier_subprog_precision.c:parent_stack_slot_precise_with_callback Reported-by: Andrew Werner Closes: https://lore.kernel.org/bpf/CA+vRuzPChFNXmouzGG+wsy=6eMcfr1mFG0F3g7rbg-sedGKW3w@mail.gmail.com/ Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-7-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 5 + kernel/bpf/verifier.c | 274 +++++++++++------- tools/testing/selftests/bpf/progs/cb_refs.c | 1 + .../selftests/bpf/progs/exceptions_fail.c | 2 + .../bpf/progs/verifier_subprog_precision.c | 71 ++++- 5 files changed, 240 insertions(+), 113 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 24213a99cc79..dd326936dd6f 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -400,6 +400,7 @@ struct bpf_verifier_state { struct bpf_idx_pair *jmp_history; u32 jmp_history_cnt; u32 dfs_depth; + u32 callback_unroll_depth; }; #define bpf_get_spilled_reg(slot, frame, mask) \ @@ -511,6 +512,10 @@ struct bpf_insn_aux_data { * this instruction, regardless of any heuristics */ bool force_checkpoint; + /* true if instruction is a call to a helper function that + * accepts callback function as a parameter. + */ + bool calls_callback; }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 68ee4803d3a2..a60dfa56ebb3 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -547,13 +547,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id) return func_id == BPF_FUNC_dynptr_data; } -static bool is_callback_calling_kfunc(u32 btf_id); +static bool is_sync_callback_calling_kfunc(u32 btf_id); static bool is_bpf_throw_kfunc(struct bpf_insn *insn); -static bool is_callback_calling_function(enum bpf_func_id func_id) +static bool is_sync_callback_calling_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_for_each_map_elem || - func_id == BPF_FUNC_timer_set_callback || func_id == BPF_FUNC_find_vma || func_id == BPF_FUNC_loop || func_id == BPF_FUNC_user_ringbuf_drain; @@ -564,6 +563,18 @@ static bool is_async_callback_calling_function(enum bpf_func_id func_id) return func_id == BPF_FUNC_timer_set_callback; } +static bool is_callback_calling_function(enum bpf_func_id func_id) +{ + return is_sync_callback_calling_function(func_id) || + is_async_callback_calling_function(func_id); +} + +static bool is_sync_callback_calling_insn(struct bpf_insn *insn) +{ + return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) || + (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm)); +} + static bool is_storage_get_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_sk_storage_get || @@ -1808,6 +1819,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->first_insn_idx = src->first_insn_idx; dst_state->last_insn_idx = src->last_insn_idx; dst_state->dfs_depth = src->dfs_depth; + dst_state->callback_unroll_depth = src->callback_unroll_depth; dst_state->used_as_loop_entry = src->used_as_loop_entry; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; @@ -3731,6 +3743,8 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) } } +static bool calls_callback(struct bpf_verifier_env *env, int insn_idx); + /* For given verifier state backtrack_insn() is called from the last insn to * the first insn. Its purpose is to compute a bitmask of registers and * stack slots that needs precision in the parent verifier state. @@ -3906,16 +3920,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, return -EFAULT; return 0; } - } else if ((bpf_helper_call(insn) && - is_callback_calling_function(insn->imm) && - !is_async_callback_calling_function(insn->imm)) || - (bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) { - /* callback-calling helper or kfunc call, which means - * we are exiting from subprog, but unlike the subprog - * call handling above, we shouldn't propagate - * precision of r1-r5 (if any requested), as they are - * not actually arguments passed directly to callback - * subprogs + } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) { + /* exit from callback subprog to callback-calling helper or + * kfunc call. Use idx/subseq_idx check to discern it from + * straight line code backtracking. + * Unlike the subprog call handling above, we shouldn't + * propagate precision of r1-r5 (if any requested), as they are + * not actually arguments passed directly to callback subprogs */ if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); @@ -3950,10 +3961,18 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, } else if (opcode == BPF_EXIT) { bool r0_precise; + /* Backtracking to a nested function call, 'idx' is a part of + * the inner frame 'subseq_idx' is a part of the outer frame. + * In case of a regular function call, instructions giving + * precision to registers R1-R5 should have been found already. + * In case of a callback, it is ok to have R1-R5 marked for + * backtracking, as these registers are set by the function + * invoking callback. + */ + if (subseq_idx >= 0 && calls_callback(env, subseq_idx)) + for (i = BPF_REG_1; i <= BPF_REG_5; i++) + bt_clear_reg(bt, i); if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { - /* if backtracing was looking for registers R1-R5 - * they should have been found already. - */ verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; @@ -9421,11 +9440,11 @@ err_out: return err; } -static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, - int *insn_idx, int subprog, - set_callee_state_fn set_callee_state_cb) +static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int insn_idx, int subprog, + set_callee_state_fn set_callee_state_cb) { - struct bpf_verifier_state *state = env->cur_state; + struct bpf_verifier_state *state = env->cur_state, *callback_state; struct bpf_func_state *caller, *callee; int err; @@ -9433,44 +9452,22 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn err = btf_check_subprog_call(env, subprog, caller->regs); if (err == -EFAULT) return err; - if (subprog_is_global(env, subprog)) { - if (err) { - verbose(env, "Caller passes invalid args into func#%d\n", - subprog); - return err; - } else { - if (env->log.level & BPF_LOG_LEVEL) - verbose(env, - "Func#%d is global and valid. Skipping.\n", - subprog); - clear_caller_saved_regs(env, caller->regs); - - /* All global functions return a 64-bit SCALAR_VALUE */ - mark_reg_unknown(env, caller->regs, BPF_REG_0); - caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; - - /* continue with next insn after call */ - return 0; - } - } /* set_callee_state is used for direct subprog calls, but we are * interested in validating only BPF helpers that can call subprogs as * callbacks */ - if (set_callee_state_cb != set_callee_state) { - env->subprog_info[subprog].is_cb = true; - if (bpf_pseudo_kfunc_call(insn) && - !is_callback_calling_kfunc(insn->imm)) { - verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", - func_id_name(insn->imm), insn->imm); - return -EFAULT; - } else if (!bpf_pseudo_kfunc_call(insn) && - !is_callback_calling_function(insn->imm)) { /* helper */ - verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", - func_id_name(insn->imm), insn->imm); - return -EFAULT; - } + env->subprog_info[subprog].is_cb = true; + if (bpf_pseudo_kfunc_call(insn) && + !is_sync_callback_calling_kfunc(insn->imm)) { + verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", + func_id_name(insn->imm), insn->imm); + return -EFAULT; + } else if (!bpf_pseudo_kfunc_call(insn) && + !is_callback_calling_function(insn->imm)) { /* helper */ + verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", + func_id_name(insn->imm), insn->imm); + return -EFAULT; } if (insn->code == (BPF_JMP | BPF_CALL) && @@ -9481,25 +9478,76 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn /* there is no real recursion here. timer callbacks are async */ env->subprog_info[subprog].is_async_cb = true; async_cb = push_async_cb(env, env->subprog_info[subprog].start, - *insn_idx, subprog); + insn_idx, subprog); if (!async_cb) return -EFAULT; callee = async_cb->frame[0]; callee->async_entry_cnt = caller->async_entry_cnt + 1; /* Convert bpf_timer_set_callback() args into timer callback args */ - err = set_callee_state_cb(env, caller, callee, *insn_idx); + err = set_callee_state_cb(env, caller, callee, insn_idx); if (err) return err; + return 0; + } + + /* for callback functions enqueue entry to callback and + * proceed with next instruction within current frame. + */ + callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); + if (!callback_state) + return -ENOMEM; + + err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb, + callback_state); + if (err) + return err; + + callback_state->callback_unroll_depth++; + return 0; +} + +static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + int *insn_idx) +{ + struct bpf_verifier_state *state = env->cur_state; + struct bpf_func_state *caller; + int err, subprog, target_insn; + + target_insn = *insn_idx + insn->imm + 1; + subprog = find_subprog(env, target_insn); + if (subprog < 0) { + verbose(env, "verifier bug. No program starts at insn %d\n", target_insn); + return -EFAULT; + } + + caller = state->frame[state->curframe]; + err = btf_check_subprog_call(env, subprog, caller->regs); + if (err == -EFAULT) + return err; + if (subprog_is_global(env, subprog)) { + if (err) { + verbose(env, "Caller passes invalid args into func#%d\n", subprog); + return err; + } + + if (env->log.level & BPF_LOG_LEVEL) + verbose(env, "Func#%d is global and valid. Skipping.\n", subprog); clear_caller_saved_regs(env, caller->regs); + + /* All global functions return a 64-bit SCALAR_VALUE */ mark_reg_unknown(env, caller->regs, BPF_REG_0); caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; + /* continue with next insn after call */ return 0; } - err = setup_func_entry(env, subprog, *insn_idx, set_callee_state_cb, state); + /* for regular function entry setup new frame and continue + * from that frame. + */ + err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state); if (err) return err; @@ -9559,22 +9607,6 @@ static int set_callee_state(struct bpf_verifier_env *env, return 0; } -static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, - int *insn_idx) -{ - int subprog, target_insn; - - target_insn = *insn_idx + insn->imm + 1; - subprog = find_subprog(env, target_insn); - if (subprog < 0) { - verbose(env, "verifier bug. No program starts at insn %d\n", - target_insn); - return -EFAULT; - } - - return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); -} - static int set_map_elem_callback_state(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee, @@ -9798,6 +9830,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); return -EINVAL; } + if (!calls_callback(env, callee->callsite)) { + verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n", + *insn_idx, callee->callsite); + return -EFAULT; + } } else { /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; @@ -9815,7 +9852,15 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) return err; } - *insn_idx = callee->callsite + 1; + /* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite, + * there function call logic would reschedule callback visit. If iteration + * converges is_state_visited() would prune that visit eventually. + */ + if (callee->in_callback_fn) + *insn_idx = callee->callsite; + else + *insn_idx = callee->callsite + 1; + if (env->log.level & BPF_LOG_LEVEL) { verbose(env, "returning from callee:\n"); print_verifier_state(env, callee, true); @@ -10228,24 +10273,24 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn } break; case BPF_FUNC_for_each_map_elem: - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_map_elem_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_map_elem_callback_state); break; case BPF_FUNC_timer_set_callback: - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_timer_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_timer_callback_state); break; case BPF_FUNC_find_vma: - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_find_vma_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_find_vma_callback_state); break; case BPF_FUNC_snprintf: err = check_bpf_snprintf_call(env, regs); break; case BPF_FUNC_loop: update_loop_inline_state(env, meta.subprogno); - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_loop_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_loop_callback_state); break; case BPF_FUNC_dynptr_from_mem: if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { @@ -10341,8 +10386,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn break; } case BPF_FUNC_user_ringbuf_drain: - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_user_ringbuf_callback_state); + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_user_ringbuf_callback_state); break; } @@ -11230,7 +11275,7 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id) btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; } -static bool is_callback_calling_kfunc(u32 btf_id) +static bool is_sync_callback_calling_kfunc(u32 btf_id) { return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; } @@ -11982,6 +12027,21 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -EACCES; } + /* Check the arguments */ + err = check_kfunc_args(env, &meta, insn_idx); + if (err < 0) + return err; + + if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_rbtree_add_callback_state); + if (err) { + verbose(env, "kfunc %s#%d failed callback verification\n", + func_name, meta.func_id); + return err; + } + } + rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); @@ -12017,10 +12077,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -EINVAL; } - /* Check the arguments */ - err = check_kfunc_args(env, &meta, insn_idx); - if (err < 0) - return err; /* In case of release function, we get register number of refcounted * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now. */ @@ -12054,16 +12110,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } } - if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { - err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, - set_rbtree_add_callback_state); - if (err) { - verbose(env, "kfunc %s#%d failed callback verification\n", - func_name, meta.func_id); - return err; - } - } - if (meta.func_id == special_kfunc_list[KF_bpf_throw]) { if (!bpf_jit_supports_exceptions()) { verbose(env, "JIT does not support calling kfunc %s#%d\n", @@ -15427,6 +15473,15 @@ static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) return env->insn_aux_data[insn_idx].force_checkpoint; } +static void mark_calls_callback(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].calls_callback = true; +} + +static bool calls_callback(struct bpf_verifier_env *env, int insn_idx) +{ + return env->insn_aux_data[insn_idx].calls_callback; +} enum { DONE_EXPLORING = 0, @@ -15540,6 +15595,21 @@ static int visit_insn(int t, struct bpf_verifier_env *env) * async state will be pushed for further exploration. */ mark_prune_point(env, t); + /* For functions that invoke callbacks it is not known how many times + * callback would be called. Verifier models callback calling functions + * by repeatedly visiting callback bodies and returning to origin call + * instruction. + * In order to stop such iteration verifier needs to identify when a + * state identical some state from a previous iteration is reached. + * Check below forces creation of checkpoint before callback calling + * instruction to allow search for such identical states. + */ + if (is_sync_callback_calling_insn(insn)) { + mark_calls_callback(env, t); + mark_force_checkpoint(env, t); + mark_prune_point(env, t); + mark_jmp_point(env, t); + } if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { struct bpf_kfunc_call_arg_meta meta; @@ -17009,10 +17079,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) } goto skip_inf_loop_check; } + if (calls_callback(env, insn_idx)) { + if (states_equal(env, &sl->state, cur, true)) + goto hit; + goto skip_inf_loop_check; + } /* attempt to detect infinite loop to avoid unnecessary doomed work */ if (states_maybe_looping(&sl->state, cur) && states_equal(env, &sl->state, cur, false) && - !iter_active_depths_differ(&sl->state, cur)) { + !iter_active_depths_differ(&sl->state, cur) && + sl->state.callback_unroll_depth == cur->callback_unroll_depth) { verbose_linfo(env, insn_idx, "; "); verbose(env, "infinite loop detected at insn %d\n", insn_idx); verbose(env, "cur state:"); diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c index 76d661b20e87..56c764df8196 100644 --- a/tools/testing/selftests/bpf/progs/cb_refs.c +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -33,6 +33,7 @@ int underflow_prog(void *ctx) if (!p) return 0; bpf_for_each_map_elem(&array_map, cb1, &p, 0); + bpf_kfunc_call_test_release(p); return 0; } diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c index 4c39e920dac2..8c0ef2742208 100644 --- a/tools/testing/selftests/bpf/progs/exceptions_fail.c +++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c @@ -171,6 +171,7 @@ int reject_with_rbtree_add_throw(void *ctx) return 0; bpf_spin_lock(&lock); bpf_rbtree_add(&rbtree, &f->node, rbless); + bpf_spin_unlock(&lock); return 0; } @@ -214,6 +215,7 @@ int reject_with_cb_reference(void *ctx) if (!f) return 0; bpf_loop(5, subprog_cb_ref, NULL, 0); + bpf_obj_drop(f); return 0; } diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c index db6b3143338b..da803cffb5ef 100644 --- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -119,15 +119,26 @@ __naked int global_subprog_result_precise(void) SEC("?raw_tp") __success __log_level(2) +/* First simulated path does not include callback body */ __msg("14: (0f) r1 += r6") -__msg("mark_precise: frame0: last_idx 14 first_idx 10") +__msg("mark_precise: frame0: last_idx 14 first_idx 9") __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4") __msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0") -__msg("mark_precise: frame0: parent state regs=r0 stack=:") -__msg("mark_precise: frame0: last_idx 18 first_idx 0") -__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit") +__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop") +/* State entering callback body popped from states stack */ +__msg("from 9 to 17: frame1:") +__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb") +__msg("17: (b7) r0 = 0") +__msg("18: (95) exit") +__msg("returning from callee:") +__msg("to caller at 9:") +/* r4 (flags) is always precise for bpf_loop() */ +__msg("frame 0: propagating r4") +__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") +__msg("mark_precise: frame0: regs=r4 stack= before 18: (95) exit") +__msg("from 18 to 9: safe") __naked int callback_result_precise(void) { asm volatile ( @@ -233,20 +244,36 @@ __naked int parent_callee_saved_reg_precise_global(void) SEC("?raw_tp") __success __log_level(2) +/* First simulated path does not include callback body */ __msg("12: (0f) r1 += r6") -__msg("mark_precise: frame0: last_idx 12 first_idx 10") +__msg("mark_precise: frame0: last_idx 12 first_idx 9") __msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7") __msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop") __msg("mark_precise: frame0: parent state regs=r6 stack=:") -__msg("mark_precise: frame0: last_idx 16 first_idx 0") -__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit") -__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0") -__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181") +__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9") __msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0") __msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0") __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8") __msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1") __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +/* State entering callback body popped from states stack */ +__msg("from 9 to 15: frame1:") +__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb") +__msg("15: (b7) r0 = 0") +__msg("16: (95) exit") +__msg("returning from callee:") +__msg("to caller at 9:") +/* r4 (flags) is always precise for bpf_loop(), + * r6 was marked before backtracking to callback body. + */ +__msg("frame 0: propagating r4,r6") +__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") +__msg("mark_precise: frame0: regs=r4,r6 stack= before 16: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0") +__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop") +__msg("mark_precise: frame0: parent state regs= stack=:") +__msg("from 16 to 9: safe") __naked int parent_callee_saved_reg_precise_with_callback(void) { asm volatile ( @@ -373,22 +400,38 @@ __naked int parent_stack_slot_precise_global(void) SEC("?raw_tp") __success __log_level(2) +/* First simulated path does not include callback body */ __msg("14: (0f) r1 += r6") -__msg("mark_precise: frame0: last_idx 14 first_idx 11") +__msg("mark_precise: frame0: last_idx 14 first_idx 10") __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") __msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop") __msg("mark_precise: frame0: parent state regs= stack=-8:") -__msg("mark_precise: frame0: last_idx 18 first_idx 0") -__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit") -__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0") -__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181") +__msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10") __msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0") __msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0") __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8") __msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6") __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6") __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +/* State entering callback body popped from states stack */ +__msg("from 10 to 17: frame1:") +__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb") +__msg("17: (b7) r0 = 0") +__msg("18: (95) exit") +__msg("returning from callee:") +__msg("to caller at 10:") +/* r4 (flags) is always precise for bpf_loop(), + * fp-8 was marked before backtracking to callback body. + */ +__msg("frame 0: propagating r4,fp-8") +__msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1") +__msg("mark_precise: frame0: regs=r4 stack=-8 before 18: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0") +__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181") +__msg("mark_precise: frame0: parent state regs= stack=:") +__msg("from 18 to 10: safe") __naked int parent_stack_slot_precise_with_callback(void) { asm volatile ( From 958465e217dbf5fc6677d42d8827fb3073d86afd Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:57 +0200 Subject: [PATCH 255/299] selftests/bpf: tests for iterating callbacks A set of test cases to check behavior of callback handling logic, check if verifier catches the following situations: - program not safe on second callback iteration; - program not safe on zero callback iterations; - infinite loop inside a callback. Verify that callback logic works for bpf_loop, bpf_for_each_map_elem, bpf_user_ringbuf_drain, bpf_find_vma. Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-8-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_iterating_callbacks.c | 147 ++++++++++++++++++ 2 files changed, 149 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index e5c61aa6604a..5cfa7a6316b6 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -31,6 +31,7 @@ #include "verifier_helper_restricted.skel.h" #include "verifier_helper_value_access.skel.h" #include "verifier_int_ptr.skel.h" +#include "verifier_iterating_callbacks.skel.h" #include "verifier_jeq_infer_not_null.skel.h" #include "verifier_ld_ind.skel.h" #include "verifier_ldsx.skel.h" @@ -139,6 +140,7 @@ void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_acces void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); } void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } +void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); } void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_ldsx(void) { RUN(verifier_ldsx); } diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c new file mode 100644 index 000000000000..fa9429f77a81 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 8); + __type(key, __u32); + __type(value, __u64); +} map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_USER_RINGBUF); + __uint(max_entries, 8); +} ringbuf SEC(".maps"); + +struct vm_area_struct; +struct bpf_map; + +struct buf_context { + char *buf; +}; + +struct num_context { + __u64 i; +}; + +__u8 choice_arr[2] = { 0, 1 }; + +static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx) +{ + if (idx == 0) { + ctx->buf = (char *)(0xDEAD); + return 0; + } + + if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE))) + return 1; + + return 0; +} + +SEC("?raw_tp") +__failure __msg("R1 type=scalar expected=fp") +int unsafe_on_2nd_iter(void *unused) +{ + char buf[4]; + struct buf_context loop_ctx = { .buf = buf }; + + bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0); + return 0; +} + +static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx) +{ + ctx->i = 0; + return 0; +} + +SEC("?raw_tp") +__failure __msg("invalid access to map value, value_size=2 off=32 size=1") +int unsafe_on_zero_iter(void *unused) +{ + struct num_context loop_ctx = { .i = 32 }; + + bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0); + return choice_arr[loop_ctx.i]; +} + +static int loop_detection_cb(__u32 idx, struct num_context *ctx) +{ + for (;;) {} + return 0; +} + +SEC("?raw_tp") +__failure __msg("infinite loop detected") +int loop_detection(void *unused) +{ + struct num_context loop_ctx = { .i = 0 }; + + bpf_loop(100, loop_detection_cb, &loop_ctx, 0); + return 0; +} + +static __always_inline __u64 oob_state_machine(struct num_context *ctx) +{ + switch (ctx->i) { + case 0: + ctx->i = 1; + break; + case 1: + ctx->i = 32; + break; + } + return 0; +} + +static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data) +{ + return oob_state_machine(data); +} + +SEC("?raw_tp") +__failure __msg("invalid access to map value, value_size=2 off=32 size=1") +int unsafe_for_each_map_elem(void *unused) +{ + struct num_context loop_ctx = { .i = 0 }; + + bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0); + return choice_arr[loop_ctx.i]; +} + +static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data) +{ + return oob_state_machine(data); +} + +SEC("?raw_tp") +__failure __msg("invalid access to map value, value_size=2 off=32 size=1") +int unsafe_ringbuf_drain(void *unused) +{ + struct num_context loop_ctx = { .i = 0 }; + + bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0); + return choice_arr[loop_ctx.i]; +} + +static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data) +{ + return oob_state_machine(data); +} + +SEC("?raw_tp") +__failure __msg("invalid access to map value, value_size=2 off=32 size=1") +int unsafe_find_vma(void *unused) +{ + struct task_struct *task = bpf_get_current_task_btf(); + struct num_context loop_ctx = { .i = 0 }; + + bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0); + return choice_arr[loop_ctx.i]; +} + +char _license[] SEC("license") = "GPL"; From cafe2c21508a38cdb3ed22708842e957b2572c3e Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:58 +0200 Subject: [PATCH 256/299] bpf: widening for callback iterators Callbacks are similar to open coded iterators, so add imprecise widening logic for callback body processing. This makes callback based loops behave identically to open coded iterators, e.g. allowing to verify programs like below: struct ctx { u32 i; }; int cb(u32 idx, struct ctx* ctx) { ++ctx->i; return 0; } ... struct ctx ctx = { .i = 0 }; bpf_loop(100, cb, &ctx, 0); ... Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-9-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a60dfa56ebb3..2f03e6b11bb9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9799,9 +9799,10 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { - struct bpf_verifier_state *state = env->cur_state; + struct bpf_verifier_state *state = env->cur_state, *prev_st; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; + bool in_callback_fn; int err; callee = state->frame[state->curframe]; @@ -9856,7 +9857,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) * there function call logic would reschedule callback visit. If iteration * converges is_state_visited() would prune that visit eventually. */ - if (callee->in_callback_fn) + in_callback_fn = callee->in_callback_fn; + if (in_callback_fn) *insn_idx = callee->callsite; else *insn_idx = callee->callsite + 1; @@ -9871,6 +9873,24 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) * bpf_throw, this will be done by copy_verifier_state for extra frames. */ free_func_state(callee); state->frame[state->curframe--] = NULL; + + /* for callbacks widen imprecise scalars to make programs like below verify: + * + * struct ctx { int i; } + * void cb(int idx, struct ctx *ctx) { ctx->i++; ... } + * ... + * struct ctx = { .i = 0; } + * bpf_loop(100, cb, &ctx, 0); + * + * This is similar to what is done in process_iter_next_call() for open + * coded iterators. + */ + prev_st = in_callback_fn ? find_prev_entry(env, state, *insn_idx) : NULL; + if (prev_st) { + err = widen_imprecise_scalars(env, prev_st, state); + if (err) + return err; + } return 0; } From 9f3330aa644d6d979eb064c46e85c62d4b4eac75 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:06:59 +0200 Subject: [PATCH 257/299] selftests/bpf: test widening for iterating callbacks A test case to verify that imprecise scalars widening is applied to callback entering state, when callback call is simulated repeatedly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-10-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- .../bpf/progs/verifier_iterating_callbacks.c | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c index fa9429f77a81..598c1e984b26 100644 --- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c +++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c @@ -25,6 +25,7 @@ struct buf_context { struct num_context { __u64 i; + __u64 j; }; __u8 choice_arr[2] = { 0, 1 }; @@ -69,6 +70,25 @@ int unsafe_on_zero_iter(void *unused) return choice_arr[loop_ctx.i]; } +static int widening_cb(__u32 idx, struct num_context *ctx) +{ + ++ctx->i; + return 0; +} + +SEC("?raw_tp") +__success +int widening(void *unused) +{ + struct num_context loop_ctx = { .i = 0, .j = 1 }; + + bpf_loop(100, widening_cb, &loop_ctx, 0); + /* loop_ctx.j is not changed during callback iteration, + * verifier should not apply widening to it. + */ + return choice_arr[loop_ctx.j]; +} + static int loop_detection_cb(__u32 idx, struct num_context *ctx) { for (;;) {} From bb124da69c47dd98d69361ec13244ece50bec63e Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:07:00 +0200 Subject: [PATCH 258/299] bpf: keep track of max number of bpf_loop callback iterations In some cases verifier can't infer convergence of the bpf_loop() iteration. E.g. for the following program: static int cb(__u32 idx, struct num_context* ctx) { ctx->i++; return 0; } SEC("?raw_tp") int prog(void *_) { struct num_context ctx = { .i = 0 }; __u8 choice_arr[2] = { 0, 1 }; bpf_loop(2, cb, &ctx, 0); return choice_arr[ctx.i]; } Each 'cb' simulation would eventually return to 'prog' and reach 'return choice_arr[ctx.i]' statement. At which point ctx.i would be marked precise, thus forcing verifier to track multitude of separate states with {.i=0}, {.i=1}, ... at bpf_loop() callback entry. This commit allows "brute force" handling for such cases by limiting number of callback body simulations using 'umax' value of the first bpf_loop() parameter. For this, extend bpf_func_state with 'callback_depth' field. Increment this field when callback visiting state is pushed to states traversal stack. For frame #N it's 'callback_depth' field counts how many times callback with frame depth N+1 had been executed. Use bpf_func_state specifically to allow independent tracking of callback depths when multiple nested bpf_loop() calls are present. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-11-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 11 ++++++ kernel/bpf/verifier.c | 19 ++++++++-- .../bpf/progs/verifier_subprog_precision.c | 35 +++++++++++++------ 3 files changed, 53 insertions(+), 12 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index dd326936dd6f..aa4d19d0bc94 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -301,6 +301,17 @@ struct bpf_func_state { struct tnum callback_ret_range; bool in_async_callback_fn; bool in_exception_callback_fn; + /* For callback calling functions that limit number of possible + * callback executions (e.g. bpf_loop) keeps track of current + * simulated iteration number. + * Value in frame N refers to number of times callback with frame + * N+1 was simulated, e.g. for the following call: + * + * bpf_loop(..., fn, ...); | suppose current frame is N + * | fn would be simulated in frame N+1 + * | number of simulations is tracked in frame N + */ + u32 callback_depth; /* The following fields should be last. See copy_func_state() */ int acquired_refs; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2f03e6b11bb9..af2819d5c8ee 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9505,6 +9505,8 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins return err; callback_state->callback_unroll_depth++; + callback_state->frame[callback_state->curframe - 1]->callback_depth++; + caller->callback_depth = 0; return 0; } @@ -10309,8 +10311,21 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn break; case BPF_FUNC_loop: update_loop_inline_state(env, meta.subprogno); - err = push_callback_call(env, insn, insn_idx, meta.subprogno, - set_loop_callback_state); + /* Verifier relies on R1 value to determine if bpf_loop() iteration + * is finished, thus mark it precise. + */ + err = mark_chain_precision(env, BPF_REG_1); + if (err) + return err; + if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_loop_callback_state); + } else { + cur_func(env)->callback_depth = 0; + if (env->log.level & BPF_LOG_LEVEL2) + verbose(env, "frame%d bpf_loop iteration limit reached\n", + env->cur_state->curframe); + } break; case BPF_FUNC_dynptr_from_mem: if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c index da803cffb5ef..f61d623b1ce8 100644 --- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -119,7 +119,23 @@ __naked int global_subprog_result_precise(void) SEC("?raw_tp") __success __log_level(2) -/* First simulated path does not include callback body */ +/* First simulated path does not include callback body, + * r1 and r4 are always precise for bpf_loop() calls. + */ +__msg("9: (85) call bpf_loop#181") +__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") +__msg("mark_precise: frame0: parent state regs=r4 stack=:") +__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9") +__msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0") +__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") +__msg("mark_precise: frame0: parent state regs=r1 stack=:") +__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9") +__msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0") +__msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0") +__msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8") +__msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +/* r6 precision propagation */ __msg("14: (0f) r1 += r6") __msg("mark_precise: frame0: last_idx 14 first_idx 9") __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") @@ -134,10 +150,9 @@ __msg("17: (b7) r0 = 0") __msg("18: (95) exit") __msg("returning from callee:") __msg("to caller at 9:") -/* r4 (flags) is always precise for bpf_loop() */ -__msg("frame 0: propagating r4") +__msg("frame 0: propagating r1,r4") __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") -__msg("mark_precise: frame0: regs=r4 stack= before 18: (95) exit") +__msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit") __msg("from 18 to 9: safe") __naked int callback_result_precise(void) { @@ -264,12 +279,12 @@ __msg("15: (b7) r0 = 0") __msg("16: (95) exit") __msg("returning from callee:") __msg("to caller at 9:") -/* r4 (flags) is always precise for bpf_loop(), +/* r1, r4 are always precise for bpf_loop(), * r6 was marked before backtracking to callback body. */ -__msg("frame 0: propagating r4,r6") +__msg("frame 0: propagating r1,r4,r6") __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") -__msg("mark_precise: frame0: regs=r4,r6 stack= before 16: (95) exit") +__msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit") __msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0") __msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop") __msg("mark_precise: frame0: parent state regs= stack=:") @@ -422,12 +437,12 @@ __msg("17: (b7) r0 = 0") __msg("18: (95) exit") __msg("returning from callee:") __msg("to caller at 10:") -/* r4 (flags) is always precise for bpf_loop(), +/* r1, r4 are always precise for bpf_loop(), * fp-8 was marked before backtracking to callback body. */ -__msg("frame 0: propagating r4,fp-8") +__msg("frame 0: propagating r1,r4,fp-8") __msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1") -__msg("mark_precise: frame0: regs=r4 stack=-8 before 18: (95) exit") +__msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit") __msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0") __msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181") __msg("mark_precise: frame0: parent state regs= stack=:") From 57e2a52deeb12ab84c15c6d0fb93638b5b94001b Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 21 Nov 2023 04:07:01 +0200 Subject: [PATCH 259/299] selftests/bpf: check if max number of bpf_loop iterations is tracked Check that even if bpf_loop() callback simulation does not converge to a specific state, verification could proceed via "brute force" simulation of maximal number of callback calls. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231121020701.26440-12-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- .../bpf/progs/verifier_iterating_callbacks.c | 75 +++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c index 598c1e984b26..5905e036e0ea 100644 --- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c +++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c @@ -164,4 +164,79 @@ int unsafe_find_vma(void *unused) return choice_arr[loop_ctx.i]; } +static int iter_limit_cb(__u32 idx, struct num_context *ctx) +{ + ctx->i++; + return 0; +} + +SEC("?raw_tp") +__success +int bpf_loop_iter_limit_ok(void *unused) +{ + struct num_context ctx = { .i = 0 }; + + bpf_loop(1, iter_limit_cb, &ctx, 0); + return choice_arr[ctx.i]; +} + +SEC("?raw_tp") +__failure __msg("invalid access to map value, value_size=2 off=2 size=1") +int bpf_loop_iter_limit_overflow(void *unused) +{ + struct num_context ctx = { .i = 0 }; + + bpf_loop(2, iter_limit_cb, &ctx, 0); + return choice_arr[ctx.i]; +} + +static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx) +{ + ctx->i += 100; + return 0; +} + +static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx) +{ + ctx->i += 10; + return 0; +} + +static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx) +{ + ctx->i += 1; + bpf_loop(1, iter_limit_level2a_cb, ctx, 0); + bpf_loop(1, iter_limit_level2b_cb, ctx, 0); + return 0; +} + +/* Check that path visiting every callback function once had been + * reached by verifier. Variables 'ctx{1,2}i' below serve as flags, + * with each decimal digit corresponding to a callback visit marker. + */ +SEC("socket") +__success __retval(111111) +int bpf_loop_iter_limit_nested(void *unused) +{ + struct num_context ctx1 = { .i = 0 }; + struct num_context ctx2 = { .i = 0 }; + __u64 a, b, c; + + bpf_loop(1, iter_limit_level1_cb, &ctx1, 0); + bpf_loop(1, iter_limit_level1_cb, &ctx2, 0); + a = ctx1.i; + b = ctx2.i; + /* Force 'ctx1.i' and 'ctx2.i' precise. */ + c = choice_arr[(a + b) % 2]; + /* This makes 'c' zero, but neither clang nor verifier know it. */ + c /= 10; + /* Make sure that verifier does not visit 'impossible' states: + * enumerate all possible callback visit masks. + */ + if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 && + b != 0 && b != 1 && b != 11 && b != 101 && b != 111) + asm volatile ("r0 /= 0;" ::: "r0"); + return 1000 * a + b + c; +} + char _license[] SEC("license") = "GPL"; From d3ec75bc635cb0cb8185b63293d33a3d1b942d22 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 21 Nov 2023 15:03:25 +0800 Subject: [PATCH 260/299] LoongArch: Add dependency between vmlinuz.efi and vmlinux.efi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A common issue in Makefile is a race in parallel building. You need to be careful to prevent multiple threads from writing to the same file simultaneously. Commit 3939f3345050 ("ARM: 8418/1: add boot image dependencies to not generate invalid images") addressed such a bad scenario. A similar symptom occurs with the following command: $ make -j$(nproc) ARCH=loongarch vmlinux.efi vmlinuz.efi [ snip ] SORTTAB vmlinux OBJCOPY arch/loongarch/boot/vmlinux.efi OBJCOPY arch/loongarch/boot/vmlinux.efi PAD arch/loongarch/boot/vmlinux.bin GZIP arch/loongarch/boot/vmlinuz OBJCOPY arch/loongarch/boot/vmlinuz.o LD arch/loongarch/boot/vmlinuz.efi.elf OBJCOPY arch/loongarch/boot/vmlinuz.efi The log "OBJCOPY arch/loongarch/boot/vmlinux.efi" is displayed twice. It indicates that two threads simultaneously enter arch/loongarch/boot/ and write to arch/loongarch/boot/vmlinux.efi. It occasionally leads to a build failure: $ make -j$(nproc) ARCH=loongarch vmlinux.efi vmlinuz.efi [ snip ] SORTTAB vmlinux OBJCOPY arch/loongarch/boot/vmlinux.efi PAD arch/loongarch/boot/vmlinux.bin truncate: Invalid number: ‘arch/loongarch/boot/vmlinux.bin’ make[2]: *** [drivers/firmware/efi/libstub/Makefile.zboot:13: arch/loongarch/boot/vmlinux.bin] Error 1 make[2]: *** Deleting file 'arch/loongarch/boot/vmlinux.bin' make[1]: *** [arch/loongarch/Makefile:146: vmlinuz.efi] Error 2 make[1]: *** Waiting for unfinished jobs.... make: *** [Makefile:234: __sub-make] Error 2 vmlinuz.efi depends on vmlinux.efi, but such a dependency is not specified in arch/loongarch/Makefile. Signed-off-by: Masahiro Yamada Signed-off-by: Huacai Chen --- arch/loongarch/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 9eeb0c05f3f4..a8d65eaf1211 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -142,6 +142,8 @@ vdso-install-y += arch/loongarch/vdso/vdso.so.dbg all: $(notdir $(KBUILD_IMAGE)) +vmlinuz.efi: vmlinux.efi + vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@ From cbfd44bd5c6eec0aada0c39130f0b8d7ecba0529 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Tue, 21 Nov 2023 15:03:25 +0800 Subject: [PATCH 261/299] LoongArch: Explicitly set -fdirect-access-external-data for vmlinux After this llvm commit [1], The -fno-pic does not imply direct access external data. Explicitly set -fdirect-access-external-data for vmlinux that can avoids GOT entries. Link: https://github.com/llvm/llvm-project/commit/47eeee297775347cbdb7624d6a766c2a3eec4a59 Suggested-by: Xi Ruoyao Signed-off-by: WANG Rui Signed-off-by: Huacai Chen --- arch/loongarch/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index a8d65eaf1211..204b94b2e6aa 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -68,6 +68,7 @@ LDFLAGS_vmlinux += -static -n -nostdlib ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS cflags-y += $(call cc-option,-mexplicit-relocs) KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access) +KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdirect-access-external-data) KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data) KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data) KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) From aa0cbc1b506b090c3a775b547c693ada108cc0d7 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Tue, 21 Nov 2023 15:03:25 +0800 Subject: [PATCH 262/299] LoongArch: Record pc instead of offset in la_abs relocation To clarify, the previous version functioned flawlessly. However, it's worth noting that the LLVM's LoongArch backend currently lacks support for cross-section label calculations. With this patch, we enable the use of clang to compile relocatable kernels. Tested-by: Nathan Chancellor Signed-off-by: WANG Rui Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/asmmacro.h | 3 +-- arch/loongarch/include/asm/setup.h | 2 +- arch/loongarch/kernel/relocate.c | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h index c9544f358c33..655db7d7a427 100644 --- a/arch/loongarch/include/asm/asmmacro.h +++ b/arch/loongarch/include/asm/asmmacro.h @@ -609,8 +609,7 @@ lu32i.d \reg, 0 lu52i.d \reg, \reg, 0 .pushsection ".la_abs", "aw", %progbits - 768: - .dword 768b-766b + .dword 766b .dword \sym .popsection #endif diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h index a0bc159ce8bd..ee52fb1e9963 100644 --- a/arch/loongarch/include/asm/setup.h +++ b/arch/loongarch/include/asm/setup.h @@ -25,7 +25,7 @@ extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len #ifdef CONFIG_RELOCATABLE struct rela_la_abs { - long offset; + long pc; long symvalue; }; diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 6c3eff9af9fb..288b739ca88d 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -52,7 +52,7 @@ static inline void __init relocate_absolute(long random_offset) for (p = begin; (void *)p < end; p++) { long v = p->symvalue; uint32_t lu12iw, ori, lu32id, lu52id; - union loongarch_instruction *insn = (void *)p - p->offset; + union loongarch_instruction *insn = (void *)p->pc; lu12iw = (v >> 12) & 0xfffff; ori = v & 0xfff; From ee2daf7102f42007b7bf5dbd1ae76478ee62c512 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 21 Nov 2023 15:03:25 +0800 Subject: [PATCH 263/299] LoongArch: Add __percpu annotation for __percpu_read()/__percpu_write() When build kernel with C=1, we get: arch/loongarch/kernel/process.c:234:46: warning: incorrect type in argument 1 (different address spaces) arch/loongarch/kernel/process.c:234:46: expected void *ptr arch/loongarch/kernel/process.c:234:46: got unsigned long [noderef] __percpu * arch/loongarch/kernel/process.c:234:46: warning: incorrect type in argument 1 (different address spaces) arch/loongarch/kernel/process.c:234:46: expected void *ptr arch/loongarch/kernel/process.c:234:46: got unsigned long [noderef] __percpu * arch/loongarch/kernel/process.c:234:46: warning: incorrect type in argument 1 (different address spaces) arch/loongarch/kernel/process.c:234:46: expected void *ptr arch/loongarch/kernel/process.c:234:46: got unsigned long [noderef] __percpu * arch/loongarch/kernel/process.c:234:46: warning: incorrect type in argument 1 (different address spaces) arch/loongarch/kernel/process.c:234:46: expected void *ptr arch/loongarch/kernel/process.c:234:46: got unsigned long [noderef] __percpu * Add __percpu annotation for __percpu_read()/__percpu_write() can avoid such warnings. __percpu_xchg() and other functions don't need annotation because their wrapper, i.e. _pcp_protect(), already suppresses warnings. Also adjust the indentations in this file. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311080409.LlOfTR3m-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202311080840.Vc2kXhfp-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202311081340.3k72KKdg-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202311120926.cjYHyoYw-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202311152142.g6UyNx1R-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202311160339.DbhaH8LX-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202311181454.CTPrSYmQ-lkp@intel.com/ Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/percpu.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index ed5da02b1cf6..9b36ac003f89 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -40,13 +40,13 @@ static __always_inline unsigned long __percpu_##op(void *ptr, \ switch (size) { \ case 4: \ __asm__ __volatile__( \ - "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \ + "am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \ : [val] "r" (val)); \ break; \ case 8: \ __asm__ __volatile__( \ - "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \ + "am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \ : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \ : [val] "r" (val)); \ break; \ @@ -63,7 +63,7 @@ PERCPU_OP(and, and, &) PERCPU_OP(or, or, |) #undef PERCPU_OP -static __always_inline unsigned long __percpu_read(void *ptr, int size) +static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size) { unsigned long ret; @@ -100,7 +100,7 @@ static __always_inline unsigned long __percpu_read(void *ptr, int size) return ret; } -static __always_inline void __percpu_write(void *ptr, unsigned long val, int size) +static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size) { switch (size) { case 1: @@ -132,8 +132,7 @@ static __always_inline void __percpu_write(void *ptr, unsigned long val, int siz } } -static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, - int size) +static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size) { switch (size) { case 1: From 902d75cdf0cf0a3fb58550089ee519abf12566f5 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 21 Nov 2023 15:03:25 +0800 Subject: [PATCH 264/299] LoongArch: Silence the boot warning about 'nokaslr' The kernel parameter 'nokaslr' is handled before start_kernel(), so we don't need early_param() to mark it technically. But it can cause a boot warning as follows: Unknown kernel command line parameters "nokaslr", will be passed to user space. When we use 'init=/bin/bash', 'nokaslr' which passed to user space will even cause a kernel panic. So we use early_param() to mark 'nokaslr', simply print a notice and silence the boot warning (also fix a potential panic). This logic is similar to RISC-V. Signed-off-by: Huacai Chen --- arch/loongarch/kernel/relocate.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 288b739ca88d..1acfa704c8d0 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -102,6 +102,14 @@ static inline __init unsigned long get_random_boot(void) return hash; } +static int __init nokaslr(char *p) +{ + pr_info("KASLR is disabled.\n"); + + return 0; /* Print a notice and silence the boot warning */ +} +early_param("nokaslr", nokaslr); + static inline __init bool kaslr_disabled(void) { char *str; From 19d86a496233731882aea7ec24505ce6641b1c0c Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 21 Nov 2023 15:03:25 +0800 Subject: [PATCH 265/299] LoongArch: Mark {dmw,tlb}_virt_to_page() exports as non-GPL Mark {dmw,tlb}_virt_to_page() exports as non-GPL, in order to let out-of-tree modules (e.g. OpenZFS) be built without errors. Otherwise we get: ERROR: modpost: GPL-incompatible module zfs.ko uses GPL-only symbol 'dmw_virt_to_page' ERROR: modpost: GPL-incompatible module zfs.ko uses GPL-only symbol 'tlb_virt_to_page' Reported-by: Haowu Ge Signed-off-by: Huacai Chen --- arch/loongarch/mm/pgtable.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 71d0539e2d0b..2aae72e63871 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -13,13 +13,13 @@ struct page *dmw_virt_to_page(unsigned long kaddr) { return pfn_to_page(virt_to_pfn(kaddr)); } -EXPORT_SYMBOL_GPL(dmw_virt_to_page); +EXPORT_SYMBOL(dmw_virt_to_page); struct page *tlb_virt_to_page(unsigned long kaddr) { return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr))); } -EXPORT_SYMBOL_GPL(tlb_virt_to_page); +EXPORT_SYMBOL(tlb_virt_to_page); pgd_t *pgd_alloc(struct mm_struct *mm) { From d43f37b73468c172bc89ac4824a1511b411f0778 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 21 Nov 2023 15:03:25 +0800 Subject: [PATCH 266/299] LoongArch: Implement constant timer shutdown interface When a cpu is hot-unplugged, it is put in idle state and the function arch_cpu_idle_dead() is called. The timer interrupt for this processor should be disabled, otherwise there will be pending timer interrupt for the unplugged cpu, so that vcpu is prevented from giving up scheduling when system is running in vm mode. This patch implements the timer shutdown interface so that the constant timer will be properly disabled when a CPU is hot-unplugged. Reviewed-by: WANG Xuerui Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- arch/loongarch/kernel/time.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 3064af94db9c..e7015f7b70e3 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -58,21 +58,6 @@ static int constant_set_state_oneshot(struct clock_event_device *evt) return 0; } -static int constant_set_state_oneshot_stopped(struct clock_event_device *evt) -{ - unsigned long timer_config; - - raw_spin_lock(&state_lock); - - timer_config = csr_read64(LOONGARCH_CSR_TCFG); - timer_config &= ~CSR_TCFG_EN; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); - - raw_spin_unlock(&state_lock); - - return 0; -} - static int constant_set_state_periodic(struct clock_event_device *evt) { unsigned long period; @@ -92,6 +77,16 @@ static int constant_set_state_periodic(struct clock_event_device *evt) static int constant_set_state_shutdown(struct clock_event_device *evt) { + unsigned long timer_config; + + raw_spin_lock(&state_lock); + + timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config &= ~CSR_TCFG_EN; + csr_write64(timer_config, LOONGARCH_CSR_TCFG); + + raw_spin_unlock(&state_lock); + return 0; } @@ -161,7 +156,7 @@ int constant_clockevent_init(void) cd->rating = 320; cd->cpumask = cpumask_of(cpu); cd->set_state_oneshot = constant_set_state_oneshot; - cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped; + cd->set_state_oneshot_stopped = constant_set_state_shutdown; cd->set_state_periodic = constant_set_state_periodic; cd->set_state_shutdown = constant_set_state_shutdown; cd->set_next_event = constant_timer_next_event; From 10301780c96e70b694eca86a3ccfa1893a4a6d3e Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Tue, 21 Nov 2023 15:03:25 +0800 Subject: [PATCH 267/299] Docs/LoongArch: Update links in LoongArch introduction.rst LoongArch-Vol1 has been updated to v1.10, the links in the documentation are out of date, let's update it. Signed-off-by: Yanteng Si Signed-off-by: Huacai Chen --- Documentation/arch/loongarch/introduction.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/arch/loongarch/introduction.rst b/Documentation/arch/loongarch/introduction.rst index 8c568cfc2107..5e6db78abeaf 100644 --- a/Documentation/arch/loongarch/introduction.rst +++ b/Documentation/arch/loongarch/introduction.rst @@ -375,9 +375,9 @@ Developer web site of Loongson and LoongArch (Software and Documentation): Documentation of LoongArch ISA: - https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-CN.pdf (in Chinese) + https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-CN.pdf (in Chinese) - https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-EN.pdf (in English) + https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-EN.pdf (in English) Documentation of LoongArch ELF psABI: From c517fd2738f472eb0d1db60a70d91629349a9bf8 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Tue, 21 Nov 2023 15:03:26 +0800 Subject: [PATCH 268/299] Docs/zh_CN/LoongArch: Update links in LoongArch introduction.rst LoongArch-Vol1 has been updated to v1.10, the links in the documentation are out of date, let's update it. Signed-off-by: Yanteng Si Signed-off-by: Huacai Chen --- .../translations/zh_CN/arch/loongarch/introduction.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/translations/zh_CN/arch/loongarch/introduction.rst b/Documentation/translations/zh_CN/arch/loongarch/introduction.rst index 59d6bf33050c..bf463c5a4c51 100644 --- a/Documentation/translations/zh_CN/arch/loongarch/introduction.rst +++ b/Documentation/translations/zh_CN/arch/loongarch/introduction.rst @@ -338,9 +338,9 @@ Loongson与LoongArch的开发者网站(软件与文档资源): LoongArch指令集架构的文档: - https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-CN.pdf (中文版) + https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-CN.pdf (中文版) - https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-EN.pdf (英文版) + https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-EN.pdf (英文版) LoongArch的ELF psABI文档: From 8d9ce3e53bbd2d6241213e9a4deb310499c929ff Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 20 Nov 2023 16:45:45 +0100 Subject: [PATCH 269/299] MAINTAINERS: Drop Mark Gross as maintainer for x86 platform drivers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mark has not really been active as maintainer for x86 platform drivers lately, drop Mark from the MAINTAINERS entries for drivers/platform/x86, drivers/platform/mellanox and drivers/platform/surface. Cc: Mark Gross Signed-off-by: Hans de Goede Acked-by: Andy Shevchenko Link: https://lore.kernel.org/r/20231120154548.611041-1-hdegoede@redhat.com Signed-off-by: Ilpo Järvinen --- MAINTAINERS | 3 --- 1 file changed, 3 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 0666ddb65552..e972ce5f54e7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13778,7 +13778,6 @@ F: drivers/net/ethernet/mellanox/mlxfw/ MELLANOX HARDWARE PLATFORM SUPPORT M: Hans de Goede M: Ilpo Järvinen -M: Mark Gross M: Vadim Pasternak L: platform-driver-x86@vger.kernel.org S: Supported @@ -14387,7 +14386,6 @@ F: drivers/platform/surface/surface_gpe.c MICROSOFT SURFACE HARDWARE PLATFORM SUPPORT M: Hans de Goede M: Ilpo Järvinen -M: Mark Gross M: Maximilian Luz L: platform-driver-x86@vger.kernel.org S: Maintained @@ -23653,7 +23651,6 @@ F: drivers/platform/x86/x86-android-tablets/ X86 PLATFORM DRIVERS M: Hans de Goede M: Ilpo Järvinen -M: Mark Gross L: platform-driver-x86@vger.kernel.org S: Maintained Q: https://patchwork.kernel.org/project/platform-driver-x86/list/ From a6584711e64d9d12ab79a450ec3628fd35e4f476 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 20 Nov 2023 17:07:56 +0200 Subject: [PATCH 270/299] platform/x86: intel_telemetry: Fix kernel doc descriptions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit LKP found issues with a kernel doc in the driver: core.c:116: warning: Function parameter or member 'ioss_evtconfig' not described in 'telemetry_update_events' core.c:188: warning: Function parameter or member 'ioss_evtconfig' not described in 'telemetry_get_eventconfig' It looks like it were copy'n'paste typos when these descriptions had been introduced. Fix the typos. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202310070743.WALmRGSY-lkp@intel.com/ Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20231120150756.1661425-1-andriy.shevchenko@linux.intel.com Reviewed-by: Rajneesh Bhardwaj Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen --- drivers/platform/x86/intel/telemetry/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c index fdf55b5d6948..e4be40f73eeb 100644 --- a/drivers/platform/x86/intel/telemetry/core.c +++ b/drivers/platform/x86/intel/telemetry/core.c @@ -102,7 +102,7 @@ static const struct telemetry_core_ops telm_defpltops = { /** * telemetry_update_events() - Update telemetry Configuration * @pss_evtconfig: PSS related config. No change if num_evts = 0. - * @pss_evtconfig: IOSS related config. No change if num_evts = 0. + * @ioss_evtconfig: IOSS related config. No change if num_evts = 0. * * This API updates the IOSS & PSS Telemetry configuration. Old config * is overwritten. Call telemetry_reset_events when logging is over @@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(telemetry_reset_events); /** * telemetry_get_eventconfig() - Returns the pss and ioss events enabled * @pss_evtconfig: Pointer to PSS related configuration. - * @pss_evtconfig: Pointer to IOSS related configuration. + * @ioss_evtconfig: Pointer to IOSS related configuration. * @pss_len: Number of u32 elements allocated for pss_evtconfig array * @ioss_len: Number of u32 elements allocated for ioss_evtconfig array * From c0e2926266af3b5acf28df0a8fc6e4d90effe0bb Mon Sep 17 00:00:00 2001 From: Kunwu Chan Date: Sun, 19 Nov 2023 22:17:59 +0800 Subject: [PATCH 271/299] ipv4: Correct/silence an endian warning in __ip_do_redirect net/ipv4/route.c:783:46: warning: incorrect type in argument 2 (different base types) net/ipv4/route.c:783:46: expected unsigned int [usertype] key net/ipv4/route.c:783:46: got restricted __be32 [usertype] new_gw Fixes: 969447f226b4 ("ipv4: use new_gw for redirect neigh lookup") Suggested-by: Eric Dumazet Signed-off-by: Kunwu Chan Link: https://lore.kernel.org/r/20231119141759.420477-1-chentao@kylinos.cn Signed-off-by: Paolo Abeni --- net/ipv4/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3290a4442b4a..16615d107cf0 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -780,7 +780,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow goto reject_redirect; } - n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); + n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw); if (!n) n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); if (!IS_ERR(n)) { From d30fb712e52964f2cf9a9c14cf67078394044837 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Sun, 19 Nov 2023 08:23:41 -0800 Subject: [PATCH 272/299] hv_netvsc: fix race of netvsc and VF register_netdevice The rtnl lock also needs to be held before rndis_filter_device_add() which advertises nvsp_2_vsc_capability / sriov bit, and triggers VF NIC offering and registering. If VF NIC finished register_netdev() earlier it may cause name based config failure. To fix this issue, move the call to rtnl_lock() before rndis_filter_device_add(), so VF will be registered later than netvsc / synthetic NIC, and gets a name numbered (ethX) after netvsc. Cc: stable@vger.kernel.org Fixes: e04e7a7bbd4b ("hv_netvsc: Fix a deadlock by getting rtnl lock earlier in netvsc_probe()") Reported-by: Dexuan Cui Signed-off-by: Haiyang Zhang Reviewed-by: Wojciech Drewek Reviewed-by: Simon Horman Reviewed-by: Dexuan Cui Signed-off-by: Paolo Abeni --- drivers/net/hyperv/netvsc_drv.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3ba3c8fb28a5..5e528a76f5f5 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2531,6 +2531,21 @@ static int netvsc_probe(struct hv_device *dev, goto devinfo_failed; } + /* We must get rtnl lock before scheduling nvdev->subchan_work, + * otherwise netvsc_subchan_work() can get rtnl lock first and wait + * all subchannels to show up, but that may not happen because + * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() + * -> ... -> device_add() -> ... -> __device_attach() can't get + * the device lock, so all the subchannels can't be processed -- + * finally netvsc_subchan_work() hangs forever. + * + * The rtnl lock also needs to be held before rndis_filter_device_add() + * which advertises nvsp_2_vsc_capability / sriov bit, and triggers + * VF NIC offering and registering. If VF NIC finished register_netdev() + * earlier it may cause name based config failure. + */ + rtnl_lock(); + nvdev = rndis_filter_device_add(dev, device_info); if (IS_ERR(nvdev)) { ret = PTR_ERR(nvdev); @@ -2540,16 +2555,6 @@ static int netvsc_probe(struct hv_device *dev, eth_hw_addr_set(net, device_info->mac_adr); - /* We must get rtnl lock before scheduling nvdev->subchan_work, - * otherwise netvsc_subchan_work() can get rtnl lock first and wait - * all subchannels to show up, but that may not happen because - * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() - * -> ... -> device_add() -> ... -> __device_attach() can't get - * the device lock, so all the subchannels can't be processed -- - * finally netvsc_subchan_work() hangs forever. - */ - rtnl_lock(); - if (nvdev->num_chn > 1) schedule_work(&nvdev->subchan_work); @@ -2586,9 +2591,9 @@ static int netvsc_probe(struct hv_device *dev, return 0; register_failed: - rtnl_unlock(); rndis_filter_device_remove(dev, nvdev); rndis_failed: + rtnl_unlock(); netvsc_devinfo_put(device_info); devinfo_failed: free_percpu(net_device_ctx->vf_stats); From 85520856466ed6bc3b1ccb013cddac70ceb437db Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Sun, 19 Nov 2023 08:23:42 -0800 Subject: [PATCH 273/299] hv_netvsc: Fix race of register_netdevice_notifier and VF register If VF NIC is registered earlier, NETDEV_REGISTER event is replayed, but NETDEV_POST_INIT is not. Move register_netdevice_notifier() earlier, so the call back function is set before probing. Cc: stable@vger.kernel.org Fixes: e04e7a7bbd4b ("hv_netvsc: Fix a deadlock by getting rtnl lock earlier in netvsc_probe()") Reported-by: Dexuan Cui Signed-off-by: Haiyang Zhang Reviewed-by: Wojciech Drewek Reviewed-by: Dexuan Cui Signed-off-by: Paolo Abeni --- drivers/net/hyperv/netvsc_drv.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5e528a76f5f5..b7dfd51f09e6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2793,12 +2793,17 @@ static int __init netvsc_drv_init(void) } netvsc_ring_bytes = ring_size * PAGE_SIZE; + register_netdevice_notifier(&netvsc_netdev_notifier); + ret = vmbus_driver_register(&netvsc_drv); if (ret) - return ret; + goto err_vmbus_reg; - register_netdevice_notifier(&netvsc_netdev_notifier); return 0; + +err_vmbus_reg: + unregister_netdevice_notifier(&netvsc_netdev_notifier); + return ret; } MODULE_LICENSE("GPL"); From c807d6cd089d2f4951baa838081ec5ae3e2360f8 Mon Sep 17 00:00:00 2001 From: Long Li Date: Sun, 19 Nov 2023 08:23:43 -0800 Subject: [PATCH 274/299] hv_netvsc: Mark VF as slave before exposing it to user-mode When a VF is being exposed form the kernel, it should be marked as "slave" before exposing to the user-mode. The VF is not usable without netvsc running as master. The user-mode should never see a VF without the "slave" flag. This commit moves the code of setting the slave flag to the time before VF is exposed to user-mode. Cc: stable@vger.kernel.org Fixes: 0c195567a8f6 ("netvsc: transparent VF management") Signed-off-by: Long Li Signed-off-by: Haiyang Zhang Acked-by: Stephen Hemminger Acked-by: Dexuan Cui Signed-off-by: Paolo Abeni --- drivers/net/hyperv/netvsc_drv.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index b7dfd51f09e6..706ea5263e87 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2206,9 +2206,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto upper_link_failed; } - /* set slave flag before open to prevent IPv6 addrconf */ - vf_netdev->flags |= IFF_SLAVE; - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); @@ -2315,16 +2312,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) } - /* Fallback path to check synthetic vf with - * help of mac addr + /* Fallback path to check synthetic vf with help of mac addr. + * Because this function can be called before vf_netdev is + * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied + * from dev_addr, also try to match to its dev_addr. + * Note: On Hyper-V and Azure, it's not possible to set a MAC address + * on a VF that matches to the MAC of a unrelated NETVSC device. */ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { ndev = hv_get_drvdata(ndev_ctx->device_ctx); - if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) { - netdev_notice(vf_netdev, - "falling back to mac addr based matching\n"); + if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) || + ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr)) return ndev; - } } netdev_notice(vf_netdev, @@ -2332,6 +2331,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) return NULL; } +static int netvsc_prepare_bonding(struct net_device *vf_netdev) +{ + struct net_device *ndev; + + ndev = get_netvsc_byslot(vf_netdev); + if (!ndev) + return NOTIFY_DONE; + + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; + return NOTIFY_DONE; +} + static int netvsc_register_vf(struct net_device *vf_netdev) { struct net_device_context *net_device_ctx; @@ -2758,6 +2770,8 @@ static int netvsc_netdev_event(struct notifier_block *this, return NOTIFY_DONE; switch (event) { + case NETDEV_POST_INIT: + return netvsc_prepare_bonding(event_dev); case NETDEV_REGISTER: return netvsc_register_vf(event_dev); case NETDEV_UNREGISTER: From 9c6dc13106f2dd2d6819d66618b25a6f41f0ee6a Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 20 Nov 2023 08:28:40 +0000 Subject: [PATCH 275/299] MAINTAINERS: Add indirect_call_wrapper.h to NETWORKING [GENERAL] indirect_call_wrapper.h is not, strictly speaking, networking specific. However, it's git history indicates that in practice changes go through netdev and thus the netdev maintainers have effectively been taking responsibility for it. Formalise this by adding it to the NETWORKING [GENERAL] section in the MAINTAINERS file. It is not clear how many other files under include/linux fall into this category and it would be interesting, as a follow-up, to audit that and propose further updates to the MAINTAINERS file as appropriate. Link: https://lore.kernel.org/netdev/20231116010310.4664dd38@kernel.org/ Signed-off-by: Simon Horman Link: https://lore.kernel.org/r/20231120-indirect_call_wrapper-maintainer-v1-1-0a6bb1f7363e@kernel.org Signed-off-by: Paolo Abeni --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 80a95878c61d..6cc0708f77b4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15057,6 +15057,7 @@ F: Documentation/networking/ F: Documentation/process/maintainer-netdev.rst F: Documentation/userspace-api/netlink/ F: include/linux/in.h +F: include/linux/indirect_call_wrapper.h F: include/linux/net.h F: include/linux/netdevice.h F: include/net/ From 0739af07d1d947af27c877f797cb82ceee702515 Mon Sep 17 00:00:00 2001 From: Jose Ignacio Tornos Martinez Date: Mon, 20 Nov 2023 13:06:29 +0100 Subject: [PATCH 276/299] net: usb: ax88179_178a: fix failed operations during ax88179_reset Using generic ASIX Electronics Corp. AX88179 Gigabit Ethernet device, the following test cycle has been implemented: - power on - check logs - shutdown - after detecting the system shutdown, disconnect power - after approximately 60 seconds of sleep, power is restored Running some cycles, sometimes error logs like this appear: kernel: ax88179_178a 2-9:1.0 (unnamed net_device) (uninitialized): Failed to write reg index 0x0001: -19 kernel: ax88179_178a 2-9:1.0 (unnamed net_device) (uninitialized): Failed to read reg index 0x0001: -19 ... These failed operation are happening during ax88179_reset execution, so the initialization could not be correct. In order to avoid this, we need to increase the delay after reset and clock initial operations. By using these larger values, many cycles have been run and no failed operations appear. It would be better to check some status register to verify when the operation has finished, but I do not have found any available information (neither in the public datasheets nor in the manufacturer's driver). The only available information for the necessary delays is the maufacturer's driver (original values) but the proposed values are not enough for the tested devices. Fixes: e2ca90c276e1f ("ax88179_178a: ASIX AX88179_178A USB 3.0/2.0 to gigabit ethernet adapter driver") Reported-by: Herb Wei Tested-by: Herb Wei Signed-off-by: Jose Ignacio Tornos Martinez Link: https://lore.kernel.org/r/20231120120642.54334-1-jtornosm@redhat.com Signed-off-by: Jakub Kicinski --- drivers/net/usb/ax88179_178a.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index aff39bf3161d..4ea0e155bb0d 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev) *tmp16 = AX_PHYPWR_RSTCTL_IPRL; ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16); - msleep(200); + msleep(500); *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS; ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp); - msleep(100); + msleep(200); /* Ethernet PHY Auto Detach*/ ax88179_auto_detach(dev); From 495ec91b48e489afefb2ad714f0d9b68c3016c6c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 20 Nov 2023 12:01:09 -0800 Subject: [PATCH 277/299] docs: netdev: try to guide people on dealing with silence There has been more than a few threads which went idle before the merge window and now people came back to them and started asking about next steps. We currently tell people to be patient and not to repost too often. Our "not too often", however, is still a few orders of magnitude faster than other subsystems. Or so I feel after hearing people talk about review rates at LPC. Clarify in the doc that if the discussion went idle for a week on netdev, 95% of the time there's no point waiting longer. Link: https://lore.kernel.org/r/20231120200109.620392-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- Documentation/process/maintainer-netdev.rst | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst index 7feacc20835e..84ee60fceef2 100644 --- a/Documentation/process/maintainer-netdev.rst +++ b/Documentation/process/maintainer-netdev.rst @@ -193,9 +193,23 @@ Review timelines Generally speaking, the patches get triaged quickly (in less than 48h). But be patient, if your patch is active in patchwork (i.e. it's listed on the project's patch list) the chances it was missed are close to zero. -Asking the maintainer for status updates on your -patch is a good way to ensure your patch is ignored or pushed to the -bottom of the priority list. + +The high volume of development on netdev makes reviewers move on +from discussions relatively quickly. New comments and replies +are very unlikely to arrive after a week of silence. If a patch +is no longer active in patchwork and the thread went idle for more +than a week - clarify the next steps and/or post the next version. + +For RFC postings specifically, if nobody responded in a week - reviewers +either missed the posting or have no strong opinions. If the code is ready, +repost as a PATCH. + +Emails saying just "ping" or "bump" are considered rude. If you can't figure +out the status of the patch from patchwork or where the discussion has +landed - describe your best guess and ask if it's correct. For example:: + + I don't understand what the next steps are. Person X seems to be unhappy + with A, should I do B and repost the patches? .. _Changes requested: From b6fe6f03716da246b453369f98a553d4ab21447c Mon Sep 17 00:00:00 2001 From: Hao Ge Date: Tue, 21 Nov 2023 09:37:09 +0800 Subject: [PATCH 278/299] dpll: Fix potential msg memleak when genlmsg_put_reply failed We should clean the skb resource if genlmsg_put_reply failed. Fixes: 9d71b54b65b1 ("dpll: netlink: Add DPLL framework base functions") Signed-off-by: Hao Ge Reviewed-by: Vadim Fedorenko Link: https://lore.kernel.org/r/20231121013709.73323-1-gehao@kylinos.cn Signed-off-by: Jakub Kicinski --- drivers/dpll/dpll_netlink.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c index a6dc3997bf5c..442a0ebeb953 100644 --- a/drivers/dpll/dpll_netlink.c +++ b/drivers/dpll/dpll_netlink.c @@ -1093,9 +1093,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0, DPLL_CMD_PIN_ID_GET); - if (!hdr) + if (!hdr) { + nlmsg_free(msg); return -EMSGSIZE; - + } pin = dpll_pin_find_from_nlattr(info); if (!IS_ERR(pin)) { ret = dpll_msg_add_pin_handle(msg, pin); @@ -1123,8 +1124,10 @@ int dpll_nl_pin_get_doit(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0, DPLL_CMD_PIN_GET); - if (!hdr) + if (!hdr) { + nlmsg_free(msg); return -EMSGSIZE; + } ret = dpll_cmd_pin_get_one(msg, pin, info->extack); if (ret) { nlmsg_free(msg); @@ -1256,8 +1259,10 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0, DPLL_CMD_DEVICE_ID_GET); - if (!hdr) + if (!hdr) { + nlmsg_free(msg); return -EMSGSIZE; + } dpll = dpll_device_find_from_nlattr(info); if (!IS_ERR(dpll)) { @@ -1284,8 +1289,10 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0, DPLL_CMD_DEVICE_GET); - if (!hdr) + if (!hdr) { + nlmsg_free(msg); return -EMSGSIZE; + } ret = dpll_device_get_one(dpll, msg, info->extack); if (ret) { From 18286883e779fb79b413a7462968ee3f6768f19c Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 14 Nov 2023 17:59:28 +0100 Subject: [PATCH 279/299] x86/hyperv: Use atomic_try_cmpxchg() to micro-optimize hv_nmi_unknown() Use atomic_try_cmpxchg() instead of atomic_cmpxchg(*ptr, old, new) == old in hv_nmi_unknown(). On x86 the CMPXCHG instruction returns success in the ZF flag, so this change saves a compare after CMPXCHG. The generated asm code improves from: 3e: 65 8b 15 00 00 00 00 mov %gs:0x0(%rip),%edx 45: b8 ff ff ff ff mov $0xffffffff,%eax 4a: f0 0f b1 15 00 00 00 lock cmpxchg %edx,0x0(%rip) 51: 00 52: 83 f8 ff cmp $0xffffffff,%eax 55: 0f 95 c0 setne %al to: 3e: 65 8b 15 00 00 00 00 mov %gs:0x0(%rip),%edx 45: b8 ff ff ff ff mov $0xffffffff,%eax 4a: f0 0f b1 15 00 00 00 lock cmpxchg %edx,0x0(%rip) 51: 00 52: 0f 95 c0 setne %al No functional change intended. Cc: "K. Y. Srinivasan" Cc: Haiyang Zhang Cc: Wei Liu Cc: Dexuan Cui Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Signed-off-by: Uros Bizjak Reviewed-by: Michael Kelley Link: https://lore.kernel.org/r/20231114170038.381634-1-ubizjak@gmail.com Signed-off-by: Wei Liu Message-ID: <20231114170038.381634-1-ubizjak@gmail.com> --- arch/x86/kernel/cpu/mshyperv.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index e6bba12c759c..01fa06dd06b6 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -262,11 +262,14 @@ static uint32_t __init ms_hyperv_platform(void) static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) { static atomic_t nmi_cpu = ATOMIC_INIT(-1); + unsigned int old_cpu, this_cpu; if (!unknown_nmi_panic) return NMI_DONE; - if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) + old_cpu = -1; + this_cpu = raw_smp_processor_id(); + if (!atomic_try_cmpxchg(&nmi_cpu, &old_cpu, this_cpu)) return NMI_HANDLED; return NMI_DONE; From 84d2db91f14a32dc856a5972e3f0907089093c7a Mon Sep 17 00:00:00 2001 From: Nguyen Dinh Phi Date: Tue, 21 Nov 2023 15:53:57 +0800 Subject: [PATCH 280/299] nfc: virtual_ncidev: Add variable to check if ndev is running syzbot reported an memory leak that happens when an skb is add to send_buff after virtual nci closed. This patch adds a variable to track if the ndev is running before handling new skb in send function. Signed-off-by: Nguyen Dinh Phi Reported-by: syzbot+6eb09d75211863f15e3e@syzkaller.appspotmail.com Closes: https://lore.kernel.org/lkml/00000000000075472b06007df4fb@google.com Reviewed-by: Bongsu Jeon Reviewed-by: Krzysztof Kozlowski Signed-off-by: David S. Miller --- drivers/nfc/virtual_ncidev.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c index b027be0b0b6f..590b038e449e 100644 --- a/drivers/nfc/virtual_ncidev.c +++ b/drivers/nfc/virtual_ncidev.c @@ -26,10 +26,14 @@ struct virtual_nci_dev { struct mutex mtx; struct sk_buff *send_buff; struct wait_queue_head wq; + bool running; }; static int virtual_nci_open(struct nci_dev *ndev) { + struct virtual_nci_dev *vdev = nci_get_drvdata(ndev); + + vdev->running = true; return 0; } @@ -40,6 +44,7 @@ static int virtual_nci_close(struct nci_dev *ndev) mutex_lock(&vdev->mtx); kfree_skb(vdev->send_buff); vdev->send_buff = NULL; + vdev->running = false; mutex_unlock(&vdev->mtx); return 0; @@ -50,7 +55,7 @@ static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb) struct virtual_nci_dev *vdev = nci_get_drvdata(ndev); mutex_lock(&vdev->mtx); - if (vdev->send_buff) { + if (vdev->send_buff || !vdev->running) { mutex_unlock(&vdev->mtx); kfree_skb(skb); return -1; From e6d71b437abc2f249e3b6a1ae1a7228e09c6e563 Mon Sep 17 00:00:00 2001 From: "D. Wythe" Date: Wed, 22 Nov 2023 10:37:05 +0800 Subject: [PATCH 281/299] net/smc: avoid data corruption caused by decline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We found a data corruption issue during testing of SMC-R on Redis applications. The benchmark has a low probability of reporting a strange error as shown below. "Error: Protocol error, got "\xe2" as reply type byte" Finally, we found that the retrieved error data was as follows: 0xE2 0xD4 0xC3 0xD9 0x04 0x00 0x2C 0x20 0xA6 0x56 0x00 0x16 0x3E 0x0C 0xCB 0x04 0x02 0x01 0x00 0x00 0x20 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0xE2 It is quite obvious that this is a SMC DECLINE message, which means that the applications received SMC protocol message. We found that this was caused by the following situations: client server ¦ clc proposal -------------> ¦ clc accept <------------- ¦ clc confirm -------------> wait llc confirm send llc confirm ¦failed llc confirm ¦ x------ (after 2s)timeout wait llc confirm rsp wait decline (after 1s) timeout (after 2s) timeout ¦ decline --------------> ¦ decline <-------------- As a result, a decline message was sent in the implementation, and this message was read from TCP by the already-fallback connection. This patch double the client timeout as 2x of the server value, With this simple change, the Decline messages should never cross or collide (during Confirm link timeout). This issue requires an immediate solution, since the protocol updates involve a more long-term solution. Fixes: 0fb0b02bd6fd ("net/smc: adapt SMC client code to use the LLC flow") Signed-off-by: D. Wythe Reviewed-by: Wen Gu Reviewed-by: Wenjia Zhang Signed-off-by: David S. Miller --- net/smc/af_smc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index da97f946b79b..2a1388841951 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -598,8 +598,12 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc) struct smc_llc_qentry *qentry; int rc; - /* receive CONFIRM LINK request from server over RoCE fabric */ - qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME, + /* Receive CONFIRM LINK request from server over RoCE fabric. + * Increasing the client's timeout by twice as much as the server's + * timeout by default can temporarily avoid decline messages of + * both sides crossing or colliding + */ + qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME, SMC_LLC_CONFIRM_LINK); if (!qentry) { struct smc_clc_msg_decline dclc; From 6a26310273c323380da21eb23fcfd50e31140913 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 21 Nov 2023 09:09:33 +0100 Subject: [PATCH 282/299] Revert "net: r8169: Disable multicast filter for RTL8168H and RTL8107E" This reverts commit efa5f1311c4998e9e6317c52bc5ee93b3a0f36df. I couldn't reproduce the reported issue. What I did, based on a pcap packet log provided by the reporter: - Used same chip version (RTL8168h) - Set MAC address to the one used on the reporters system - Replayed the EAPOL unicast packet that, according to the reporter, was filtered out by the mc filter. The packet was properly received. Therefore the root cause of the reported issue seems to be somewhere else. Disabling mc filtering completely for the most common chip version is a quite big hammer. Therefore revert the change and wait for further analysis results from the reporter. Cc: stable@vger.kernel.org Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index b9bb1d2f0237..295366a85c63 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2599,9 +2599,7 @@ static void rtl_set_rx_mode(struct net_device *dev) rx_mode &= ~AcceptMulticast; } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || dev->flags & IFF_ALLMULTI || - tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_46 || - tp->mac_version == RTL_GIGA_MAC_VER_48) { + tp->mac_version == RTL_GIGA_MAC_VER_35) { /* accept all multicasts */ } else if (netdev_mc_empty(dev)) { rx_mode &= ~AcceptMulticast; From 125b0bb95dd6bec81b806b997a4ccb026eeecf8f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 9 Nov 2023 22:22:13 -0800 Subject: [PATCH 283/299] asm-generic: qspinlock: fix queued_spin_value_unlocked() implementation We really don't want to do atomic_read() or anything like that, since we already have the value, not the lock. The whole point of this is that we've loaded the lock from memory, and we want to check whether the value we loaded was a locked one or not. The main use of this is the lockref code, which loads both the lock and the reference count in one atomic operation, and then works on that combined value. With the atomic_read(), the compiler would pointlessly spill the value to the stack, in order to then be able to read it back "atomically". This is the qspinlock version of commit c6f4a9002252 ("asm-generic: ticket-lock: Optimize arch_spin_value_unlocked()") which fixed this same bug for ticket locks. Cc: Guo Ren Cc: Ingo Molnar Cc: Waiman Long Link: https://lore.kernel.org/all/CAHk-=whNRv0v6kQiV5QO6DJhjH4KEL36vWQ6Re8Csrnh4zbRkQ@mail.gmail.com/ Signed-off-by: Linus Torvalds --- include/asm-generic/qspinlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 995513fa2690..0655aa5b57b2 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -70,7 +70,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) */ static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) { - return !atomic_read(&lock.val); + return !lock.val.counter; } /** From 99360d9620f09fb8bc15548d855011bbb198c680 Mon Sep 17 00:00:00 2001 From: Lech Perczak Date: Sat, 18 Nov 2023 00:19:18 +0100 Subject: [PATCH 284/299] net: usb: qmi_wwan: claim interface 4 for ZTE MF290 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Interface 4 is used by for QMI interface in stock firmware of MF28D, the router which uses MF290 modem. Rebind it to qmi_wwan after freeing it up from option driver. The proper configuration is: Interface mapping is: 0: QCDM, 1: (unknown), 2: AT (PCUI), 2: AT (Modem), 4: QMI T: Bus=01 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 4 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 P: Vendor=19d2 ProdID=0189 Rev= 0.00 S: Manufacturer=ZTE, Incorporated S: Product=ZTE LTE Technologies MSM C:* #Ifs= 5 Cfg#= 1 Atr=e0 MxPwr=500mA I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=4ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=4ms I:* If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=4ms I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=84(I) Atr=03(Int.) MxPS= 64 Ivl=2ms E: Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=4ms I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan E: Ad=86(I) Atr=03(Int.) MxPS= 64 Ivl=2ms E: Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=4ms Cc: Bjørn Mork Signed-off-by: Lech Perczak Link: https://lore.kernel.org/r/20231117231918.100278-3-lech.perczak@gmail.com Signed-off-by: Paolo Abeni --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 344af3c5c836..e2e181378f41 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1289,6 +1289,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0168, 4)}, {QMI_FIXED_INTF(0x19d2, 0x0176, 3)}, {QMI_FIXED_INTF(0x19d2, 0x0178, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x0189, 4)}, /* ZTE MF290 */ {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */ {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, From 4aa1d8f89b10cdc25a231dabf808d8935e0b137a Mon Sep 17 00:00:00 2001 From: Suman Ghosh Date: Tue, 21 Nov 2023 22:26:24 +0530 Subject: [PATCH 285/299] octeontx2-pf: Fix ntuple rule creation to direct packet to VF with higher Rx queue than its PF It is possible to add a ntuple rule which would like to direct packet to a VF whose number of queues are greater/less than its PF's queue numbers. For example a PF can have 2 Rx queues but a VF created on that PF can have 8 Rx queues. As of today, ntuple rule will reject rule because it is checking the requested queue number against PF's number of Rx queues. As a part of this fix if the action of a ntuple rule is to move a packet to a VF's queue then the check is removed. Also, a debug information is printed to aware user that it is user's responsibility to cross check if the requested queue number on that VF is a valid one. Fixes: f0a1913f8a6f ("octeontx2-pf: Add support for ethtool ntuple filters") Signed-off-by: Suman Ghosh Reviewed-by: Wojciech Drewek Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20231121165624.3664182-1-sumang@marvell.com Signed-off-by: Paolo Abeni --- .../marvell/octeontx2/nic/otx2_flows.c | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 4762dbea64a1..97a71e9b8563 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -1088,6 +1088,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) struct ethhdr *eth_hdr; bool new = false; int err = 0; + u64 vf_num; u32 ring; if (!flow_cfg->max_flows) { @@ -1100,7 +1101,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return -ENOMEM; - if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) + /* Number of queues on a VF can be greater or less than + * the PF's queue. Hence no need to check for the + * queue count. Hence no need to check queue count if PF + * is installing for its VF. Below is the expected vf_num value + * based on the ethtool commands. + * + * e.g. + * 1. ethtool -U ... action -1 ==> vf_num:255 + * 2. ethtool -U ... action ==> vf_num:0 + * 3. ethtool -U ... vf queue ==> + * vf_num:vf_idx+1 + */ + vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (!is_otx2_vf(pfvf->pcifunc) && !vf_num && + ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; if (fsp->location >= otx2_get_maxflows(flow_cfg)) @@ -1182,6 +1197,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) flow_cfg->nr_flows++; } + if (flow->is_vf) + netdev_info(pfvf->netdev, + "Make sure that VF's queue number is within its queue limit\n"); return 0; } From 818ad9cc90d4a7165caaee7e32800c50d0564ec3 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 21 Nov 2023 20:08:44 +0100 Subject: [PATCH 286/299] net: veth: fix ethtool stats reporting Fix a possible misalignment between page_pool stats and tx xdp_stats reported in veth_get_ethtool_stats routine. The issue can be reproduced configuring the veth pair with the following tx/rx queues: $ip link add v0 numtxqueues 2 numrxqueues 4 type veth peer name v1 \ numtxqueues 1 numrxqueues 1 and loading a simple XDP program on v0 that just returns XDP_PASS. In this case on v0 the page_pool stats overwrites tx xdp_stats for queue 1. Fix the issue incrementing pp_idx of dev->real_num_tx_queues * VETH_TQ_STATS_LEN since we always report xdp_stats for all tx queues in ethtool. Fixes: 4fc418053ec7 ("net: veth: add page_pool stats") Signed-off-by: Lorenzo Bianconi Link: https://lore.kernel.org/r/c5b5d0485016836448453f12846c7c4ab75b094a.1700593593.git.lorenzo@kernel.org Signed-off-by: Paolo Abeni --- drivers/net/veth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 6cc352296c67..57efb3454c57 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -236,8 +236,8 @@ static void veth_get_ethtool_stats(struct net_device *dev, data[tx_idx + j] += *(u64 *)(base + offset); } } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); - pp_idx = tx_idx + VETH_TQ_STATS_LEN; } + pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN; page_pool_stats: veth_get_page_pool_stats(dev, &data[pp_idx]); From 676ec53844cbdf2f47e68a076cdff7f0ec6cbe3f Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Wed, 22 Nov 2023 00:44:33 +0530 Subject: [PATCH 287/299] amd-xgbe: handle corner-case during sfp hotplug Force the mode change for SFI in Fixed PHY configurations. Fixed PHY configurations needs PLL to be enabled while doing mode set. When the SFP module isn't connected during boot, driver assumes AN is ON and attempts auto-negotiation. However, if the connected SFP comes up in Fixed PHY configuration the link will not come up as PLL isn't enabled while the initial mode set command is issued. So, force the mode change for SFI in Fixed PHY configuration to fix link issues. Fixes: e57f7a3feaef ("amd-xgbe: Prepare for working with more than one type of phy") Acked-by: Shyam Sundar S K Signed-off-by: Raju Rangoju Reviewed-by: Wojciech Drewek Signed-off-by: Paolo Abeni --- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 32d2c6fac652..4a2dc705b528 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1193,7 +1193,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) if (pdata->phy.duplex != DUPLEX_FULL) return -EINVAL; - xgbe_set_mode(pdata, mode); + /* Force the mode change for SFI in Fixed PHY config. + * Fixed PHY configs needs PLL to be enabled while doing mode set. + * When the SFP module isn't connected during boot, driver assumes + * AN is ON and attempts autonegotiation. However, if the connected + * SFP comes up in Fixed PHY config, the link will not come up as + * PLL isn't enabled while the initial mode set command is issued. + * So, force the mode change for SFI in Fixed PHY configuration to + * fix link issues. + */ + if (mode == XGBE_MODE_SFI) + xgbe_change_mode(pdata, mode); + else + xgbe_set_mode(pdata, mode); return 0; } From 7121205d5330c6a3cb3379348886d47c77b78d06 Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Wed, 22 Nov 2023 00:44:34 +0530 Subject: [PATCH 288/299] amd-xgbe: handle the corner-case during tx completion The existing implementation uses software logic to accumulate tx completions until the specified time (1ms) is met and then poll them. However, there exists a tiny gap which leads to a race between resetting and checking the tx_activate flag. Due to this the tx completions are not reported to upper layer and tx queue timeout kicks-in restarting the device. To address this, introduce a tx cleanup mechanism as part of the periodic maintenance process. Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver") Acked-by: Shyam Sundar S K Signed-off-by: Raju Rangoju Reviewed-by: Wojciech Drewek Signed-off-by: Paolo Abeni --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 614c0278419b..6b73648b3779 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work) static void xgbe_service_timer(struct timer_list *t) { struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer); + struct xgbe_channel *channel; + unsigned int i; queue_work(pdata->dev_workqueue, &pdata->service_work); mod_timer(&pdata->service_timer, jiffies + HZ); + + if (!pdata->tx_usecs) + return; + + for (i = 0; i < pdata->channel_count; i++) { + channel = pdata->channel[i]; + if (!channel->tx_ring || channel->tx_timer_active) + break; + channel->tx_timer_active = 1; + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); + } } static void xgbe_init_timers(struct xgbe_prv_data *pdata) From 7a2323ac24a50311f64a3a9b54ed5bef5821ecae Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Wed, 22 Nov 2023 00:44:35 +0530 Subject: [PATCH 289/299] amd-xgbe: propagate the correct speed and duplex status xgbe_get_link_ksettings() does not propagate correct speed and duplex information to ethtool during cable unplug. Due to which ethtool reports incorrect values for speed and duplex. Address this by propagating correct information. Fixes: 7c12aa08779c ("amd-xgbe: Move the PHY support into amd-xgbe") Acked-by: Shyam Sundar S K Signed-off-by: Raju Rangoju Reviewed-by: Wojciech Drewek Signed-off-by: Paolo Abeni --- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 6e83ff59172a..32fab5e77246 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev, cmd->base.phy_address = pdata->phy.address; - cmd->base.autoneg = pdata->phy.autoneg; - cmd->base.speed = pdata->phy.speed; - cmd->base.duplex = pdata->phy.duplex; + if (netif_carrier_ok(netdev)) { + cmd->base.speed = pdata->phy.speed; + cmd->base.duplex = pdata->phy.duplex; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.autoneg = pdata->phy.autoneg; cmd->base.port = PORT_NONE; XGBE_LM_COPY(cmd, supported, lks, supported); From 0ffb08b1a45bd6b7694e01da0e1d9e3e788418fb Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 21 Nov 2023 13:12:55 -0800 Subject: [PATCH 290/299] ice: remove ptp_tx ring parameter flag Before performing a Tx timestamp in ice_stamp(), the driver checks a ptp_tx ring variable to see if timestamping is enabled on that ring. This value is set for all rings whenever userspace configures Tx timestamping. Ostensibly this was done to avoid wasting cycles checking other fields when timestamping has not been enabled. However, for Tx timestamps we already get an individual per-SKB flag indicating whether userspace wants to request a timestamp on that packet. We do not gain much by also having a separate flag to check for whether timestamping was enabled. In fact, the driver currently fails to restore the field after a PF reset. Because of this, if a PF reset occurs, timestamps will be disabled. Since this flag doesn't add value in the hotpath, remove it and always provide a timestamp if the SKB flag has been set. A following change will fix the reset path to properly restore user timestamping configuration completely. This went unnoticed for some time because one of the most common applications using Tx timestamps, ptp4l, will reconfigure the socket as part of its fault recovery logic. Fixes: ea9b847cda64 ("ice: enable transmit timestamps for E810 devices") Signed-off-by: Jacob Keller Reviewed-by: Jesse Brandeburg Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Signed-off-by: Paolo Abeni --- drivers/net/ethernet/intel/ice/ice_ptp.c | 14 -------------- drivers/net/ethernet/intel/ice/ice_txrx.c | 3 --- drivers/net/ethernet/intel/ice/ice_txrx.h | 1 - 3 files changed, 18 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 1eddcbe89b0c..affd90622a68 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -280,20 +280,6 @@ static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on) */ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) { - struct ice_vsi *vsi; - u16 i; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return; - - /* Set the timestamp enable flag for all the Tx rings */ - ice_for_each_txq(vsi, i) { - if (!vsi->tx_rings[i]) - continue; - vsi->tx_rings[i]->ptp_tx = on; - } - if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF) ice_ptp_configure_tx_tstamp(pf, on); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 52d0a126eb61..9e97ea863068 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2306,9 +2306,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) return; - if (!tx_ring->ptp_tx) - return; - /* Tx timestamps cannot be sampled when doing TSO */ if (first->tx_flags & ICE_TX_FLAGS_TSO) return; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 166413fc33f4..daf7b9dbb143 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -380,7 +380,6 @@ struct ice_tx_ring { #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) u8 flags; u8 dcb_tc; /* Traffic class of ring */ - u8 ptp_tx; } ____cacheline_internodealigned_in_smp; static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) From 7d606a1e2d0575b6c3a2600f43f90d1e409f9661 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 21 Nov 2023 13:12:56 -0800 Subject: [PATCH 291/299] ice: unify logic for programming PFINT_TSYN_MSK Commit d938a8cca88a ("ice: Auxbus devices & driver for E822 TS") modified how Tx timestamps are handled for E822 devices. On these devices, only the clock owner handles reading the Tx timestamp data from firmware. To do this, the PFINT_TSYN_MSK register is modified from the default value to one which enables reacting to a Tx timestamp on all PHY ports. The driver currently programs PFINT_TSYN_MSK in different places depending on whether the port is the clock owner or not. For the clock owner, the PFINT_TSYN_MSK value is programmed during ice_ptp_init_owner just before calling ice_ptp_tx_ena_intr to program the PHY ports. For the non-clock owner ports, the PFINT_TSYN_MSK is programmed during ice_ptp_init_port. If a large enough device reset occurs, the PFINT_TSYN_MSK register will be reset to the default value in which only the PHY associated directly with the PF will cause the Tx timestamp interrupt to trigger. The driver lacks logic to reprogram the PFINT_TSYN_MSK register after a device reset. For the E822 device, this results in the PF no longer responding to interrupts for other ports. This results in failure to deliver Tx timestamps to user space applications. Rename ice_ptp_configure_tx_tstamp to ice_ptp_cfg_tx_interrupt, and unify the logic for programming PFINT_TSYN_MSK and PFINT_OICR_ENA into one place. This function will program both registers according to the combination of user configuration and device requirements. This ensures that PFINT_TSYN_MSK is always restored when we configure the Tx timestamp interrupt. Fixes: d938a8cca88a ("ice: Auxbus devices & driver for E822 TS") Signed-off-by: Jacob Keller Reviewed-by: Jesse Brandeburg Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Signed-off-by: Paolo Abeni --- drivers/net/ethernet/intel/ice/ice_ptp.c | 60 ++++++++++++++---------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index affd90622a68..624d05b4bbd9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -256,21 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, } /** - * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt - * @pf: The PF pointer to search in - * @on: bool value for whether timestamp interrupt is enabled or disabled + * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device + * @pf: Board private structure + * + * Program the device to respond appropriately to the Tx timestamp interrupt + * cause. */ -static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on) +static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) { + struct ice_hw *hw = &pf->hw; + bool enable; u32 val; + switch (pf->ptp.tx_interrupt_mode) { + case ICE_PTP_TX_INTERRUPT_ALL: + /* React to interrupts across all quads. */ + wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); + enable = true; + break; + case ICE_PTP_TX_INTERRUPT_NONE: + /* Do not react to interrupts on any quad. */ + wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); + enable = false; + break; + case ICE_PTP_TX_INTERRUPT_SELF: + default: + enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; + break; + } + /* Configure the Tx timestamp interrupt */ - val = rd32(&pf->hw, PFINT_OICR_ENA); - if (on) + val = rd32(hw, PFINT_OICR_ENA); + if (enable) val |= PFINT_OICR_TSYN_TX_M; else val &= ~PFINT_OICR_TSYN_TX_M; - wr32(&pf->hw, PFINT_OICR_ENA, val); + wr32(hw, PFINT_OICR_ENA, val); } /** @@ -280,10 +301,9 @@ static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on) */ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) { - if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF) - ice_ptp_configure_tx_tstamp(pf, on); - pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + + ice_ptp_cfg_tx_interrupt(pf); } /** @@ -2789,15 +2809,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf) /* Release the global hardware lock */ ice_ptp_unlock(hw); - if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) { - /* The clock owner for this device type handles the timestamp - * interrupt for all ports. - */ - ice_ptp_configure_tx_tstamp(pf, true); - - /* React on all quads interrupts for E82x */ - wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); - + if (!ice_is_e810(hw)) { /* Enable quad interrupts */ err = ice_ptp_tx_ena_intr(pf, true, itr); if (err) @@ -2867,13 +2879,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); case ICE_PHY_E822: - /* Non-owner PFs don't react to any interrupts on E82x, - * neither on own quad nor on others - */ - if (!ice_ptp_pf_handles_tx_interrupt(pf)) { - ice_ptp_configure_tx_tstamp(pf, false); - wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); - } kthread_init_delayed_work(&ptp_port->ov_work, ice_ptp_wait_for_offsets); @@ -3018,6 +3023,9 @@ void ice_ptp_init(struct ice_pf *pf) /* Start the PHY timestamping block */ ice_ptp_reset_phy_timestamping(pf); + /* Configure initial Tx interrupt settings */ + ice_ptp_cfg_tx_interrupt(pf); + set_bit(ICE_FLAG_PTP, pf->flags); err = ice_ptp_init_work(pf, ptp); if (err) From 7758017911a4f2578d54c318e8fe77bcb5899054 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 21 Nov 2023 13:12:57 -0800 Subject: [PATCH 292/299] ice: restore timestamp configuration after device reset The driver calls ice_ptp_cfg_timestamp() during ice_ptp_prepare_for_reset() to disable timestamping while the device is resetting. This operation destroys the user requested configuration. While the driver does call ice_ptp_cfg_timestamp in ice_rebuild() to restore some hardware settings after a reset, it unconditionally passes true or false, resulting in failure to restore previous user space configuration. This results in a device reset forcibly disabling timestamp configuration regardless of current user settings. This was not detected previously due to a quirk of the LinuxPTP ptp4l application. If ptp4l detects a missing timestamp, it enters a fault state and performs recovery logic which includes executing SIOCSHWTSTAMP again, restoring the now accidentally cleared configuration. Not every application does this, and for these applications, timestamps will mysteriously stop after a PF reset, without being restored until an application restart. Fix this by replacing ice_ptp_cfg_timestamp() with two new functions: 1) ice_ptp_disable_timestamp_mode() which unconditionally disables the timestamping logic in ice_ptp_prepare_for_reset() and ice_ptp_release() 2) ice_ptp_restore_timestamp_mode() which calls ice_ptp_restore_tx_interrupt() to restore Tx timestamping configuration, calls ice_set_rx_tstamp() to restore Rx timestamping configuration, and issues an immediate TSYN_TX interrupt to ensure that timestamps which may have occurred during the device reset get processed. Modify the ice_ptp_set_timestamp_mode to directly save the user configuration and then call ice_ptp_restore_timestamp_mode. This way, reset no longer destroys the saved user configuration. This obsoletes the ice_set_tx_tstamp() function which can now be safely removed. With this change, all devices should now restore Tx and Rx timestamping functionality correctly after a PF reset without application intervention. Fixes: 77a781155a65 ("ice: enable receive hardware timestamping") Fixes: ea9b847cda64 ("ice: enable transmit timestamps for E810 devices") Signed-off-by: Jacob Keller Reviewed-by: Jesse Brandeburg Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Signed-off-by: Paolo Abeni --- drivers/net/ethernet/intel/ice/ice_main.c | 12 +--- drivers/net/ethernet/intel/ice/ice_ptp.c | 76 ++++++++++++++--------- drivers/net/ethernet/intel/ice/ice_ptp.h | 5 +- 3 files changed, 52 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 6607fa6fe556..fb9c93f37e84 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -7401,15 +7401,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - /* configure PTP timestamping after VSI rebuild */ - if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) { - if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF) - ice_ptp_cfg_timestamp(pf, false); - else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) - /* for E82x PHC owner always need to have interrupts */ - ice_ptp_cfg_timestamp(pf, true); - } - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); if (err) { dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); @@ -7461,6 +7452,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_plug_aux_dev(pf); if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) ice_lag_rebuild(pf); + + /* Restore timestamp mode settings after VSI rebuild */ + ice_ptp_restore_timestamp_mode(pf); return; err_vsi_rebuild: diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 624d05b4bbd9..71f405f8a6fe 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -294,18 +294,6 @@ static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, val); } -/** - * ice_set_tx_tstamp - Enable or disable Tx timestamping - * @pf: The PF pointer to search in - * @on: bool value for whether timestamps are enabled or disabled - */ -static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) -{ - pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - - ice_ptp_cfg_tx_interrupt(pf); -} - /** * ice_set_rx_tstamp - Enable or disable Rx timestamping * @pf: The PF pointer to search in @@ -317,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) u16 i; vsi = ice_get_main_vsi(pf); - if (!vsi) + if (!vsi || !vsi->rx_rings) return; /* Set the timestamp flag for all the Rx rings */ @@ -326,23 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) continue; vsi->rx_rings[i]->ptp_rx = on; } - - pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : - HWTSTAMP_FILTER_NONE; } /** - * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit + * ice_ptp_disable_timestamp_mode - Disable current timestamp mode * @pf: Board private structure - * @ena: bool value to enable or disable time stamp * - * This function will configure timestamping during PTP initialization - * and deinitialization + * Called during preparation for reset to temporarily disable timestamping on + * the device. Called during remove to disable timestamping while cleaning up + * driver resources. */ -void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) +static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) { - ice_set_tx_tstamp(pf, ena); - ice_set_rx_tstamp(pf, ena); + struct ice_hw *hw = &pf->hw; + u32 val; + + val = rd32(hw, PFINT_OICR_ENA); + val &= ~PFINT_OICR_TSYN_TX_M; + wr32(hw, PFINT_OICR_ENA, val); + + ice_set_rx_tstamp(pf, false); +} + +/** + * ice_ptp_restore_timestamp_mode - Restore timestamp configuration + * @pf: Board private structure + * + * Called at the end of rebuild to restore timestamp configuration after + * a device reset. + */ +void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + bool enable_rx; + + ice_ptp_cfg_tx_interrupt(pf); + + enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; + ice_set_rx_tstamp(pf, enable_rx); + + /* Trigger an immediate software interrupt to ensure that timestamps + * which occurred during reset are handled now. + */ + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); } /** @@ -2043,10 +2058,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) { switch (config->tx_type) { case HWTSTAMP_TX_OFF: - ice_set_tx_tstamp(pf, false); + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; break; case HWTSTAMP_TX_ON: - ice_set_tx_tstamp(pf, true); + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; break; default: return -ERANGE; @@ -2054,7 +2069,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - ice_set_rx_tstamp(pf, false); + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: @@ -2070,12 +2085,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: - ice_set_rx_tstamp(pf, true); + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } + /* Immediately update the device timestamping mode */ + ice_ptp_restore_timestamp_mode(pf); + return 0; } @@ -2743,7 +2761,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf) clear_bit(ICE_FLAG_PTP, pf->flags); /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ice_ptp_disable_timestamp_mode(pf); kthread_cancel_delayed_work_sync(&ptp->work); @@ -3061,7 +3079,7 @@ void ice_ptp_release(struct ice_pf *pf) return; /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ice_ptp_disable_timestamp_mode(pf); ice_ptp_remove_auxbus_device(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 8f6f94392756..06a330867fc9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -292,7 +292,7 @@ int ice_ptp_clock_index(struct ice_pf *pf); struct ice_pf; int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr); int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); -void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena); +void ice_ptp_restore_timestamp_mode(struct ice_pf *pf); void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); @@ -317,8 +317,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) return -EOPNOTSUPP; } -static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { } - +static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { } static inline void ice_ptp_extts_event(struct ice_pf *pf) { } static inline s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) From 4e20655e503e3a478cd1682bf25e3202dd823da8 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Tue, 21 Nov 2023 13:13:36 -0800 Subject: [PATCH 293/299] i40e: Fix adding unsupported cloud filters If a VF tries to add unsupported cloud filter through virtchnl then i40e_add_del_cloud_filter(_big_buf) returns -ENOTSUPP but this error code is stored in 'ret' instead of 'aq_ret' that is used as error code sent back to VF. In this scenario where one of the mentioned functions fails the value of 'aq_ret' is zero so the VF will incorrectly receive a 'success'. Use 'aq_ret' to store return value and remove 'ret' local variable. Additionally fix the issue when filter allocation fails, in this case no notification is sent back to the VF. Fixes: e284fc280473 ("i40e: Add and delete cloud filter") Reviewed-by: Simon Horman Signed-off-by: Ivan Vecera Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Link: https://lore.kernel.org/r/20231121211338.3348677-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 08d7edccfb8d..3f99eb198245 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3844,7 +3844,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; - int i, ret; + int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = -EINVAL; @@ -3868,8 +3868,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); - if (!cfilter) - return -ENOMEM; + if (!cfilter) { + aq_ret = -ENOMEM; + goto err_out; + } /* parse destination mac address */ for (i = 0; i < ETH_ALEN; i++) @@ -3917,13 +3919,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) /* Adding cloud filter programmed as TC filter */ if (tcf.dst_port) - ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); + aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); else - ret = i40e_add_del_cloud_filter(vsi, cfilter, true); - if (ret) { + aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true); + if (aq_ret) { dev_err(&pf->pdev->dev, "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", - vf->vf_id, ERR_PTR(ret), + vf->vf_id, ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto err_free; } From f0863888f6cfef33e3117dccfe94fa78edf76be4 Mon Sep 17 00:00:00 2001 From: Arseniy Krasnov Date: Wed, 22 Nov 2023 00:16:42 +0300 Subject: [PATCH 294/299] vsock/test: fix SEQPACKET message bounds test Tune message length calculation to make this test work on machines where 'getpagesize()' returns >32KB. Now maximum message length is not hardcoded (on machines above it was smaller than 'getpagesize()' return value, thus we get negative value and test fails), but calculated at runtime and always bigger than 'getpagesize()' result. Reproduced on aarch64 with 64KB page size. Fixes: 5c338112e48a ("test/vsock: rework message bounds test") Signed-off-by: Arseniy Krasnov Reported-by: Bogdan Marcynkov Reviewed-by: Stefano Garzarella Link: https://lore.kernel.org/r/20231121211642.163474-1-avkrasnov@salutedevices.com Signed-off-by: Jakub Kicinski --- tools/testing/vsock/vsock_test.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index 5b0e93f9996c..01fa816868bc 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -353,11 +353,12 @@ static void test_stream_msg_peek_server(const struct test_opts *opts) } #define SOCK_BUF_SIZE (2 * 1024 * 1024) -#define MAX_MSG_SIZE (32 * 1024) +#define MAX_MSG_PAGES 4 static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) { unsigned long curr_hash; + size_t max_msg_size; int page_size; int msg_count; int fd; @@ -373,7 +374,8 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) curr_hash = 0; page_size = getpagesize(); - msg_count = SOCK_BUF_SIZE / MAX_MSG_SIZE; + max_msg_size = MAX_MSG_PAGES * page_size; + msg_count = SOCK_BUF_SIZE / max_msg_size; for (int i = 0; i < msg_count; i++) { size_t buf_size; @@ -383,7 +385,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) /* Use "small" buffers and "big" buffers. */ if (i & 1) buf_size = page_size + - (rand() % (MAX_MSG_SIZE - page_size)); + (rand() % (max_msg_size - page_size)); else buf_size = 1 + (rand() % page_size); @@ -429,7 +431,6 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts) unsigned long remote_hash; unsigned long curr_hash; int fd; - char buf[MAX_MSG_SIZE]; struct msghdr msg = {0}; struct iovec iov = {0}; @@ -457,8 +458,13 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts) control_writeln("SRVREADY"); /* Wait, until peer sends whole data. */ control_expectln("SENDDONE"); - iov.iov_base = buf; - iov.iov_len = sizeof(buf); + iov.iov_len = MAX_MSG_PAGES * getpagesize(); + iov.iov_base = malloc(iov.iov_len); + if (!iov.iov_base) { + perror("malloc"); + exit(EXIT_FAILURE); + } + msg.msg_iov = &iov; msg.msg_iovlen = 1; @@ -483,6 +489,7 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts) curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size); } + free(iov.iov_base); close(fd); remote_hash = control_readulong(); From fd0413bbf8b11f56e8aa842783b0deda0dfe2926 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Tue, 21 Nov 2023 16:42:17 -0800 Subject: [PATCH 295/299] net: axienet: Fix check for partial TX checksum Due to a typo, the code checked the RX checksum feature in the TX path. Fixes: 8a3b7a252dca ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver") Signed-off-by: Samuel Holland Reviewed-by: Andrew Lunn Reviewed-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/20231122004219.3504219-1-samuel.holland@sifive.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 82d0d44b2b02..bf6e33990490 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -822,7 +822,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; - } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { + } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { csum_start_off = skb_transport_offset(skb); csum_index_off = csum_start_off + skb->csum_offset; /* Tx Partial Checksum Offload Enabled */ From 53f2cb491b500897a619ff6abd72f565933760f0 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Wed, 22 Nov 2023 22:44:47 +0100 Subject: [PATCH 296/299] tls: fix NULL deref on tls_sw_splice_eof() with empty record syzkaller discovered that if tls_sw_splice_eof() is executed as part of sendfile() when the plaintext/ciphertext sk_msg are empty, the send path gets confused because the empty ciphertext buffer does not have enough space for the encryption overhead. This causes tls_push_record() to go on the `split = true` path (which is only supposed to be used when interacting with an attached BPF program), and then get further confused and hit the tls_merge_open_record() path, which then assumes that there must be at least one populated buffer element, leading to a NULL deref. It is possible to have empty plaintext/ciphertext buffers if we previously bailed from tls_sw_sendmsg_locked() via the tls_trim_both_msgs() path. tls_sw_push_pending_record() already handles this case correctly; let's do the same check in tls_sw_splice_eof(). Fixes: df720d288dbb ("tls/sw: Use splice_eof() to flush") Cc: stable@vger.kernel.org Reported-by: syzbot+40d43509a099ea756317@syzkaller.appspotmail.com Signed-off-by: Jann Horn Link: https://lore.kernel.org/r/20231122214447.675768-1-jannh@google.com Signed-off-by: Jakub Kicinski --- net/tls/tls_sw.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index a78e8e722409..316f76187962 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1232,11 +1232,14 @@ void tls_sw_splice_eof(struct socket *sock) lock_sock(sk); retry: + /* same checks as in tls_sw_push_pending_record() */ rec = ctx->open_rec; if (!rec) goto unlock; msg_pl = &rec->msg_plaintext; + if (msg_pl->sg.size == 0) + goto unlock; /* Check the BPF advisor and perform transmission. */ ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA, From 37f0205538baf70beb57cdcb6c7d14aa13257926 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 22 Nov 2023 17:17:08 -0600 Subject: [PATCH 297/299] net: ipa: fix one GSI register field width The width of the R_LENGTH field of the EV_CH_E_CNTXT_1 GSI register is 24 bits (not 20 bits) starting with IPA v5.0. Fix this. Fixes: faf0678ec8a0 ("net: ipa: add IPA v5.0 GSI register definitions") Signed-off-by: Alex Elder Link: https://lore.kernel.org/r/20231122231708.896632-1-elder@linaro.org Signed-off-by: Jakub Kicinski --- drivers/net/ipa/reg/gsi_reg-v5.0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c index d7b81a36d673..145eb0bd096d 100644 --- a/drivers/net/ipa/reg/gsi_reg-v5.0.c +++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c @@ -78,7 +78,7 @@ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0, 0x0001c000 + 0x12000 * GSI_EE_AP, 0x80); static const u32 reg_ev_ch_e_cntxt_1_fmask[] = { - [R_LENGTH] = GENMASK(19, 0), + [R_LENGTH] = GENMASK(23, 0), }; REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1, From 2be35a619482c1f4e5bc7a2d84049b8d7d171882 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 22 Nov 2023 19:06:24 -0800 Subject: [PATCH 298/299] tools: ynl: fix header path for nfsd The makefile dependency is trying to include the wrong header: : fatal error: ../../../../include/uapi//linux/nfsd.h: No such file or directory The guard also looks wrong. Fixes: f14122b2c2ac ("tools: ynl: Add source files for nfsd netlink protocol") Reviewed-by: Chuck Lever Link: https://lore.kernel.org/r/20231123030624.1611925-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- tools/net/ynl/Makefile.deps | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps index 64d139400db1..3110f84dd029 100644 --- a/tools/net/ynl/Makefile.deps +++ b/tools/net/ynl/Makefile.deps @@ -18,4 +18,4 @@ CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h) CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h) CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h) CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h) -CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_H,nfsd.h) +CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_NETLINK_H,nfsd_netlink.h) From 39f04b1406b23fcc129a67e70d6205d5a7322f38 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 22 Nov 2023 19:05:58 -0800 Subject: [PATCH 299/299] tools: ynl: fix duplicate op name in devlink MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't support CRUD-inspired message types in YNL too well. One aspect that currently trips us up is the fact that single message ID can be used in multiple commands (as the response). This leads to duplicate entries in the id-to-string tables: devlink-user.c:19:34: warning: initialized field overwritten [-Woverride-init] 19 | [DEVLINK_CMD_PORT_NEW] = "port-new", | ^~~~~~~~~~ devlink-user.c:19:34: note: (near initialization for ‘devlink_op_strmap[7]’) Fixes tag points at where the code was generated, the "real" problem is that the code generator does not support CRUD. Fixes: f2f9dd164db0 ("netlink: specs: devlink: add the remaining command to generate complete split_ops") Link: https://lore.kernel.org/r/20231123030558.1611831-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- tools/net/ynl/generated/devlink-user.c | 2 +- tools/net/ynl/ynl-gen-c.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/net/ynl/generated/devlink-user.c b/tools/net/ynl/generated/devlink-user.c index bc5065bd99b2..c12ca87ca2bb 100644 --- a/tools/net/ynl/generated/devlink-user.c +++ b/tools/net/ynl/generated/devlink-user.c @@ -15,7 +15,7 @@ /* Enums */ static const char * const devlink_op_strmap[] = { [3] = "get", - [7] = "port-get", + // skip "port-get", duplicate reply value [DEVLINK_CMD_PORT_NEW] = "port-new", [13] = "sb-get", [17] = "sb-pool-get", diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index c4003a83cd5d..3bd6b928c14f 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -1505,6 +1505,12 @@ def put_op_name(family, cw): cw.block_start(line=f"static const char * const {map_name}[] =") for op_name, op in family.msgs.items(): if op.rsp_value: + # Make sure we don't add duplicated entries, if multiple commands + # produce the same response in legacy families. + if family.rsp_by_value[op.rsp_value] != op: + cw.p(f'// skip "{op_name}", duplicate reply value') + continue + if op.req_value == op.rsp_value: cw.p(f'[{op.enum_name}] = "{op_name}",') else: