Merge: update cpufreq to match upstream v6.15
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6598 Reesolves: 83803 JIRA: https://issues.redhat.com/browse/RHEL-83803 Adds bugfixes and regular updates to match upstream v6.15. Signed-off-by: Mark Langsdorf <mlangsdo@redhat.com> Approved-by: Eric Chanudet <echanude@redhat.com> Approved-by: David Arcari <darcari@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Augusto Caringi <acaringi@redhat.com>
This commit is contained in:
commit
1b730d5bcb
|
@ -618,7 +618,7 @@
|
|||
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
|
||||
#define HID1_PS (1<<16) /* 750FX PLL selection */
|
||||
#endif
|
||||
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
|
||||
#define SPRN_HID2_750FX 0x3F8 /* IBM 750FX HID2 Register */
|
||||
#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
|
||||
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
|
||||
#define SPRN_IABR2 0x3FA /* 83xx */
|
||||
|
|
|
@ -372,7 +372,7 @@ _GLOBAL(__save_cpu_setup)
|
|||
andi. r3,r3,0xff00
|
||||
cmpwi cr0,r3,0x0200
|
||||
bne 1f
|
||||
mfspr r4,SPRN_HID2
|
||||
mfspr r4,SPRN_HID2_750FX
|
||||
stw r4,CS_HID2(r5)
|
||||
1:
|
||||
mtcr r7
|
||||
|
@ -467,7 +467,7 @@ _GLOBAL(__restore_cpu_setup)
|
|||
bne 4f
|
||||
lwz r4,CS_HID2(r5)
|
||||
rlwinm r4,r4,0,19,17
|
||||
mtspr SPRN_HID2,r4
|
||||
mtspr SPRN_HID2_750FX,r4
|
||||
sync
|
||||
4:
|
||||
lwz r4,CS_HID1(r5)
|
||||
|
|
|
@ -714,7 +714,7 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
|||
case SPRN_HID1:
|
||||
to_book3s(vcpu)->hid[1] = spr_val;
|
||||
break;
|
||||
case SPRN_HID2:
|
||||
case SPRN_HID2_750FX:
|
||||
to_book3s(vcpu)->hid[2] = spr_val;
|
||||
break;
|
||||
case SPRN_HID2_GEKKO:
|
||||
|
@ -900,7 +900,7 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
|
|||
case SPRN_HID1:
|
||||
*spr_val = to_book3s(vcpu)->hid[1];
|
||||
break;
|
||||
case SPRN_HID2:
|
||||
case SPRN_HID2_750FX:
|
||||
case SPRN_HID2_GEKKO:
|
||||
*spr_val = to_book3s(vcpu)->hid[2];
|
||||
break;
|
||||
|
|
|
@ -199,7 +199,8 @@ lite5200_wakeup:
|
|||
|
||||
/* HIDs, MSR */
|
||||
LOAD_SPRN(HID1, 0x19)
|
||||
LOAD_SPRN(HID2, 0x1a)
|
||||
/* FIXME: Should this use HID2_G2_LE? */
|
||||
LOAD_SPRN(HID2_750FX, 0x1a)
|
||||
|
||||
|
||||
/* address translation is tricky (see turn_on_mmu) */
|
||||
|
@ -279,7 +280,8 @@ save_regs:
|
|||
|
||||
SAVE_SPRN(HID0, 0x18)
|
||||
SAVE_SPRN(HID1, 0x19)
|
||||
SAVE_SPRN(HID2, 0x1a)
|
||||
/* FIXME: Should this use HID2_G2_LE? */
|
||||
SAVE_SPRN(HID2_750FX, 0x1a)
|
||||
mfmsr r10
|
||||
stw r10, (4*0x1b)(r4)
|
||||
/*SAVE_SPRN(LR, 0x1c) have to save it before the call */
|
||||
|
|
|
@ -68,7 +68,8 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
|
|||
|
||||
mfspr r5, SPRN_HID0
|
||||
mfspr r6, SPRN_HID1
|
||||
mfspr r7, SPRN_HID2
|
||||
/* FIXME: Should this use SPRN_HID2_G2_LE? */
|
||||
mfspr r7, SPRN_HID2_750FX
|
||||
|
||||
stw r5, SS_HID+0(r3)
|
||||
stw r6, SS_HID+4(r3)
|
||||
|
@ -396,7 +397,8 @@ mpc83xx_deep_resume:
|
|||
|
||||
mtspr SPRN_HID0, r5
|
||||
mtspr SPRN_HID1, r6
|
||||
mtspr SPRN_HID2, r7
|
||||
/* FIXME: Should this use SPRN_HID2_G2_LE? */
|
||||
mtspr SPRN_HID2_750FX, r7
|
||||
|
||||
lwz r4, SS_IABR+0(r3)
|
||||
lwz r5, SS_IABR+4(r3)
|
||||
|
|
|
@ -674,15 +674,17 @@
|
|||
#define MSR_AMD_CPPC_REQ 0xc00102b3
|
||||
#define MSR_AMD_CPPC_STATUS 0xc00102b4
|
||||
|
||||
#define AMD_CPPC_LOWEST_PERF(x) (((x) >> 0) & 0xff)
|
||||
#define AMD_CPPC_LOWNONLIN_PERF(x) (((x) >> 8) & 0xff)
|
||||
#define AMD_CPPC_NOMINAL_PERF(x) (((x) >> 16) & 0xff)
|
||||
#define AMD_CPPC_HIGHEST_PERF(x) (((x) >> 24) & 0xff)
|
||||
/* Masks for use with MSR_AMD_CPPC_CAP1 */
|
||||
#define AMD_CPPC_LOWEST_PERF_MASK GENMASK(7, 0)
|
||||
#define AMD_CPPC_LOWNONLIN_PERF_MASK GENMASK(15, 8)
|
||||
#define AMD_CPPC_NOMINAL_PERF_MASK GENMASK(23, 16)
|
||||
#define AMD_CPPC_HIGHEST_PERF_MASK GENMASK(31, 24)
|
||||
|
||||
#define AMD_CPPC_MAX_PERF(x) (((x) & 0xff) << 0)
|
||||
#define AMD_CPPC_MIN_PERF(x) (((x) & 0xff) << 8)
|
||||
#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16)
|
||||
#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24)
|
||||
/* Masks for use with MSR_AMD_CPPC_REQ */
|
||||
#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
|
||||
#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
|
||||
#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
|
||||
#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
|
||||
|
||||
/* AMD Performance Counter Global Status and Control MSRs */
|
||||
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
* Copyright (c) 2016, Intel Corporation.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
#include <acpi/cppc_acpi.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -154,7 +156,7 @@ int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
val = AMD_CPPC_HIGHEST_PERF(val);
|
||||
val = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, val);
|
||||
} else {
|
||||
ret = cppc_get_highest_perf(cpu, &val);
|
||||
if (ret)
|
||||
|
|
|
@ -217,6 +217,20 @@ config CPUFREQ_DT
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
config CPUFREQ_VIRT
|
||||
tristate "Virtual cpufreq driver"
|
||||
depends on GENERIC_ARCH_TOPOLOGY
|
||||
help
|
||||
This adds a virtualized cpufreq driver for guest kernels that
|
||||
read/writes to a MMIO region for a virtualized cpufreq device to
|
||||
communicate with the host. It sends performance requests to the host
|
||||
which gets used as a hint to schedule vCPU threads and select CPU
|
||||
frequency. If a VM does not support a virtualized FIE such as AMUs,
|
||||
it updates the frequency scaling factor by polling host CPU frequency
|
||||
to enable accurate Per-Entity Load Tracking for tasks running in the guest.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config CPUFREQ_DT_PLATDEV
|
||||
tristate "Generic DT based cpufreq platdev driver"
|
||||
depends on OF
|
||||
|
@ -231,9 +245,7 @@ if X86
|
|||
source "drivers/cpufreq/Kconfig.x86"
|
||||
endif
|
||||
|
||||
if ARM || ARM64
|
||||
source "drivers/cpufreq/Kconfig.arm"
|
||||
endif
|
||||
|
||||
if PPC32 || PPC64
|
||||
source "drivers/cpufreq/Kconfig.powerpc"
|
||||
|
|
|
@ -17,13 +17,6 @@ config CPU_FREQ_CBE_PMI
|
|||
frequencies. Using PMI, the processor will not only be able to run at
|
||||
lower speed, but also at lower core voltage.
|
||||
|
||||
config CPU_FREQ_MAPLE
|
||||
bool "Support for Maple 970FX Evaluation Board"
|
||||
depends on PPC_MAPLE
|
||||
help
|
||||
This adds support for frequency switching on Maple 970FX
|
||||
Evaluation Board and compatible boards (IBM JS2x blades).
|
||||
|
||||
config CPU_FREQ_PMAC
|
||||
bool "Support for Apple PowerBooks"
|
||||
depends on ADB_PMU && PPC32
|
||||
|
|
|
@ -16,6 +16,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
|
|||
|
||||
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
|
||||
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
|
||||
obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
|
||||
|
||||
# Traces
|
||||
CFLAGS_amd-pstate-trace.o := -I$(src)
|
||||
|
@ -90,7 +91,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
|
|||
obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
|
||||
ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
|
||||
obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
|
||||
obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
|
||||
obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
|
||||
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
|
||||
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
|
||||
|
|
|
@ -73,20 +73,17 @@ static unsigned int acpi_pstate_strict;
|
|||
|
||||
static bool boost_state(unsigned int cpu)
|
||||
{
|
||||
u32 lo, hi;
|
||||
u64 msr;
|
||||
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
case X86_VENDOR_CENTAUR:
|
||||
case X86_VENDOR_ZHAOXIN:
|
||||
rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
|
||||
msr = lo | ((u64)hi << 32);
|
||||
rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
|
||||
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
|
||||
case X86_VENDOR_HYGON:
|
||||
case X86_VENDOR_AMD:
|
||||
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
|
||||
msr = lo | ((u64)hi << 32);
|
||||
rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr);
|
||||
return !(msr & MSR_K7_HWCR_CPB_DIS);
|
||||
}
|
||||
return false;
|
||||
|
@ -626,7 +623,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_CPPC_LIB
|
||||
static u64 get_max_boost_ratio(unsigned int cpu)
|
||||
/*
|
||||
* get_max_boost_ratio: Computes the max_boost_ratio as the ratio
|
||||
* between the highest_perf and the nominal_perf.
|
||||
*
|
||||
* Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
|
||||
* frequency via @nominal_freq if it is non-NULL pointer.
|
||||
*/
|
||||
static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
|
||||
{
|
||||
struct cppc_perf_caps perf_caps;
|
||||
u64 highest_perf, nominal_perf;
|
||||
|
@ -655,6 +659,9 @@ static u64 get_max_boost_ratio(unsigned int cpu)
|
|||
|
||||
nominal_perf = perf_caps.nominal_perf;
|
||||
|
||||
if (nominal_freq)
|
||||
*nominal_freq = perf_caps.nominal_freq * 1000;
|
||||
|
||||
if (!highest_perf || !nominal_perf) {
|
||||
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
|
||||
return 0;
|
||||
|
@ -667,8 +674,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
|
|||
|
||||
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
|
||||
static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
|
@ -678,9 +689,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
struct acpi_cpufreq_data *data;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
u64 max_boost_ratio, nominal_freq = 0;
|
||||
unsigned int valid_states = 0;
|
||||
unsigned int result = 0;
|
||||
u64 max_boost_ratio;
|
||||
unsigned int i;
|
||||
#ifdef CONFIG_SMP
|
||||
static int blacklisted;
|
||||
|
@ -830,16 +841,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
}
|
||||
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
max_boost_ratio = get_max_boost_ratio(cpu);
|
||||
max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
|
||||
if (max_boost_ratio) {
|
||||
unsigned int freq = freq_table[0].frequency;
|
||||
unsigned int freq = nominal_freq;
|
||||
|
||||
/*
|
||||
* Because the loop above sorts the freq_table entries in the
|
||||
* descending order, freq is the maximum frequency in the table.
|
||||
* Assume that it corresponds to the CPPC nominal frequency and
|
||||
* use it to set cpuinfo.max_freq.
|
||||
* The loop above sorts the freq_table entries in the
|
||||
* descending order. If ACPI CPPC has not advertised
|
||||
* the nominal frequency (this is possible in CPPC
|
||||
* revisions prior to 3), then use the first entry in
|
||||
* the pstate table as a proxy for nominal frequency.
|
||||
*/
|
||||
if (!freq)
|
||||
freq = freq_table[0].frequency;
|
||||
|
||||
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
|
||||
} else {
|
||||
/*
|
||||
|
@ -891,8 +906,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
policy->fast_switch_possible = !acpi_pstate_strict &&
|
||||
!(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
|
||||
|
||||
if (acpi_cpufreq_driver.set_boost)
|
||||
set_boost(policy, acpi_cpufreq_driver.boost_enabled);
|
||||
if (acpi_cpufreq_driver.set_boost) {
|
||||
if (policy->boost_supported) {
|
||||
/*
|
||||
* The firmware may have altered boost state while the
|
||||
* CPU was offline (for example during a suspend-resume
|
||||
* cycle).
|
||||
*/
|
||||
if (policy->boost_enabled != boost_state(cpu))
|
||||
set_boost(policy, policy->boost_enabled);
|
||||
} else {
|
||||
policy->boost_supported = true;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
|
@ -907,7 +933,7 @@ err_free:
|
|||
return result;
|
||||
}
|
||||
|
||||
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
|
||||
|
@ -920,8 +946,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
|||
free_cpumask_var(data->freqdomain_cpus);
|
||||
kfree(policy->freq_table);
|
||||
kfree(data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
|
||||
|
@ -946,7 +970,6 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
|||
}
|
||||
|
||||
static struct freq_attr *acpi_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
&freqdomain_cpus,
|
||||
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
|
||||
&cpb,
|
||||
|
|
|
@ -24,9 +24,9 @@
|
|||
|
||||
TRACE_EVENT(amd_pstate_perf,
|
||||
|
||||
TP_PROTO(unsigned long min_perf,
|
||||
unsigned long target_perf,
|
||||
unsigned long capacity,
|
||||
TP_PROTO(u8 min_perf,
|
||||
u8 target_perf,
|
||||
u8 capacity,
|
||||
u64 freq,
|
||||
u64 mperf,
|
||||
u64 aperf,
|
||||
|
@ -47,9 +47,9 @@ TRACE_EVENT(amd_pstate_perf,
|
|||
),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, min_perf)
|
||||
__field(unsigned long, target_perf)
|
||||
__field(unsigned long, capacity)
|
||||
__field(u8, min_perf)
|
||||
__field(u8, target_perf)
|
||||
__field(u8, capacity)
|
||||
__field(unsigned long long, freq)
|
||||
__field(unsigned long long, mperf)
|
||||
__field(unsigned long long, aperf)
|
||||
|
@ -70,10 +70,10 @@ TRACE_EVENT(amd_pstate_perf,
|
|||
__entry->fast_switch = fast_switch;
|
||||
),
|
||||
|
||||
TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
|
||||
(unsigned long)__entry->min_perf,
|
||||
(unsigned long)__entry->target_perf,
|
||||
(unsigned long)__entry->capacity,
|
||||
TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
|
||||
(u8)__entry->min_perf,
|
||||
(u8)__entry->target_perf,
|
||||
(u8)__entry->capacity,
|
||||
(unsigned long long)__entry->freq,
|
||||
(unsigned long long)__entry->mperf,
|
||||
(unsigned long long)__entry->aperf,
|
||||
|
@ -86,11 +86,12 @@ TRACE_EVENT(amd_pstate_perf,
|
|||
TRACE_EVENT(amd_pstate_epp_perf,
|
||||
|
||||
TP_PROTO(unsigned int cpu_id,
|
||||
unsigned int highest_perf,
|
||||
unsigned int epp,
|
||||
unsigned int min_perf,
|
||||
unsigned int max_perf,
|
||||
bool boost
|
||||
u8 highest_perf,
|
||||
u8 epp,
|
||||
u8 min_perf,
|
||||
u8 max_perf,
|
||||
bool boost,
|
||||
bool changed
|
||||
),
|
||||
|
||||
TP_ARGS(cpu_id,
|
||||
|
@ -98,15 +99,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
|
|||
epp,
|
||||
min_perf,
|
||||
max_perf,
|
||||
boost),
|
||||
boost,
|
||||
changed),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, cpu_id)
|
||||
__field(unsigned int, highest_perf)
|
||||
__field(unsigned int, epp)
|
||||
__field(unsigned int, min_perf)
|
||||
__field(unsigned int, max_perf)
|
||||
__field(u8, highest_perf)
|
||||
__field(u8, epp)
|
||||
__field(u8, min_perf)
|
||||
__field(u8, max_perf)
|
||||
__field(bool, boost)
|
||||
__field(bool, changed)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
|
|||
__entry->min_perf = min_perf;
|
||||
__entry->max_perf = max_perf;
|
||||
__entry->boost = boost;
|
||||
__entry->changed = changed;
|
||||
),
|
||||
|
||||
TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
|
||||
TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
|
||||
(unsigned int)__entry->cpu_id,
|
||||
(unsigned int)__entry->min_perf,
|
||||
(unsigned int)__entry->max_perf,
|
||||
(unsigned int)__entry->highest_perf,
|
||||
(unsigned int)__entry->epp,
|
||||
(bool)__entry->boost
|
||||
(u8)__entry->min_perf,
|
||||
(u8)__entry->max_perf,
|
||||
(u8)__entry->highest_perf,
|
||||
(u8)__entry->epp,
|
||||
(bool)__entry->boost,
|
||||
(bool)__entry->changed
|
||||
)
|
||||
);
|
||||
|
||||
|
|
|
@ -22,39 +22,31 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
#include <acpi/cppc_acpi.h>
|
||||
|
||||
#include "amd-pstate.h"
|
||||
|
||||
/*
|
||||
* Abbreviations:
|
||||
* amd_pstate_ut: used as a shortform for AMD P-State unit test.
|
||||
* It helps to keep variable names smaller, simpler
|
||||
*/
|
||||
enum amd_pstate_ut_result {
|
||||
AMD_PSTATE_UT_RESULT_PASS,
|
||||
AMD_PSTATE_UT_RESULT_FAIL,
|
||||
};
|
||||
|
||||
struct amd_pstate_ut_struct {
|
||||
const char *name;
|
||||
void (*func)(u32 index);
|
||||
enum amd_pstate_ut_result result;
|
||||
int (*func)(u32 index);
|
||||
};
|
||||
|
||||
/*
|
||||
* Kernel module for testing the AMD P-State unit test
|
||||
*/
|
||||
static void amd_pstate_ut_acpi_cpc_valid(u32 index);
|
||||
static void amd_pstate_ut_check_enabled(u32 index);
|
||||
static void amd_pstate_ut_check_perf(u32 index);
|
||||
static void amd_pstate_ut_check_freq(u32 index);
|
||||
static void amd_pstate_ut_check_driver(u32 index);
|
||||
static int amd_pstate_ut_acpi_cpc_valid(u32 index);
|
||||
static int amd_pstate_ut_check_enabled(u32 index);
|
||||
static int amd_pstate_ut_check_perf(u32 index);
|
||||
static int amd_pstate_ut_check_freq(u32 index);
|
||||
static int amd_pstate_ut_check_driver(u32 index);
|
||||
|
||||
static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
|
||||
{"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
|
||||
|
@ -77,71 +69,67 @@ static bool get_shared_mem(void)
|
|||
/*
|
||||
* check the _CPC object is present in SBIOS.
|
||||
*/
|
||||
static void amd_pstate_ut_acpi_cpc_valid(u32 index)
|
||||
static int amd_pstate_ut_acpi_cpc_valid(u32 index)
|
||||
{
|
||||
if (acpi_cpc_valid())
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
|
||||
else {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
if (!acpi_cpc_valid()) {
|
||||
pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void amd_pstate_ut_pstate_enable(u32 index)
|
||||
{
|
||||
int ret = 0;
|
||||
u64 cppc_enable = 0;
|
||||
|
||||
ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
|
||||
if (ret) {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
|
||||
return;
|
||||
}
|
||||
if (cppc_enable)
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
|
||||
else {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s amd pstate must be enabled!\n", __func__);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if amd pstate is enabled
|
||||
*/
|
||||
static void amd_pstate_ut_check_enabled(u32 index)
|
||||
static int amd_pstate_ut_check_enabled(u32 index)
|
||||
{
|
||||
u64 cppc_enable = 0;
|
||||
int ret;
|
||||
|
||||
if (get_shared_mem())
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
|
||||
else
|
||||
amd_pstate_ut_pstate_enable(index);
|
||||
return 0;
|
||||
|
||||
ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
|
||||
if (ret) {
|
||||
pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!cppc_enable) {
|
||||
pr_err("%s amd pstate must be enabled!\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if performance values are reasonable.
|
||||
* highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
|
||||
*/
|
||||
static void amd_pstate_ut_check_perf(u32 index)
|
||||
static int amd_pstate_ut_check_perf(u32 index)
|
||||
{
|
||||
int cpu = 0, ret = 0;
|
||||
u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
|
||||
u64 cap1 = 0;
|
||||
struct cppc_perf_caps cppc_perf;
|
||||
struct cpufreq_policy *policy = NULL;
|
||||
struct amd_cpudata *cpudata = NULL;
|
||||
union perf_cached cur_perf;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
|
||||
struct amd_cpudata *cpudata;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy)
|
||||
break;
|
||||
continue;
|
||||
cpudata = policy->driver_data;
|
||||
|
||||
if (get_shared_mem()) {
|
||||
ret = cppc_get_perf_caps(cpu, &cppc_perf);
|
||||
if (ret) {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
|
||||
goto skip_test;
|
||||
return ret;
|
||||
}
|
||||
|
||||
highest_perf = cppc_perf.highest_perf;
|
||||
|
@ -151,50 +139,44 @@ static void amd_pstate_ut_check_perf(u32 index)
|
|||
} else {
|
||||
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
|
||||
if (ret) {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
|
||||
goto skip_test;
|
||||
return ret;
|
||||
}
|
||||
|
||||
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
|
||||
nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
|
||||
lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
|
||||
lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
|
||||
highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1);
|
||||
nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
|
||||
lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
|
||||
lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
|
||||
}
|
||||
|
||||
if (highest_perf != READ_ONCE(cpudata->highest_perf) && !cpudata->hw_prefcore) {
|
||||
cur_perf = READ_ONCE(cpudata->perf);
|
||||
if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) {
|
||||
pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
|
||||
__func__, cpu, highest_perf, cpudata->highest_perf);
|
||||
goto skip_test;
|
||||
__func__, cpu, highest_perf, cur_perf.highest_perf);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
|
||||
(lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
|
||||
(lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
if (nominal_perf != cur_perf.nominal_perf ||
|
||||
(lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) ||
|
||||
(lowest_perf != cur_perf.lowest_perf)) {
|
||||
pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
|
||||
__func__, cpu, nominal_perf, cpudata->nominal_perf,
|
||||
lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
|
||||
lowest_perf, cpudata->lowest_perf);
|
||||
goto skip_test;
|
||||
__func__, cpu, nominal_perf, cur_perf.nominal_perf,
|
||||
lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf,
|
||||
lowest_perf, cur_perf.lowest_perf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!((highest_perf >= nominal_perf) &&
|
||||
(nominal_perf > lowest_nonlinear_perf) &&
|
||||
(lowest_nonlinear_perf > lowest_perf) &&
|
||||
(lowest_nonlinear_perf >= lowest_perf) &&
|
||||
(lowest_perf > 0))) {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
|
||||
__func__, cpu, highest_perf, nominal_perf,
|
||||
lowest_nonlinear_perf, lowest_perf);
|
||||
goto skip_test;
|
||||
return -EINVAL;
|
||||
}
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
|
||||
return;
|
||||
skip_test:
|
||||
cpufreq_cpu_put(policy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -202,59 +184,50 @@ skip_test:
|
|||
* max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
|
||||
* check max freq when set support boost mode.
|
||||
*/
|
||||
static void amd_pstate_ut_check_freq(u32 index)
|
||||
static int amd_pstate_ut_check_freq(u32 index)
|
||||
{
|
||||
int cpu = 0;
|
||||
struct cpufreq_policy *policy = NULL;
|
||||
struct amd_cpudata *cpudata = NULL;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
|
||||
struct amd_cpudata *cpudata;
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy)
|
||||
break;
|
||||
continue;
|
||||
cpudata = policy->driver_data;
|
||||
|
||||
if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
|
||||
if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) &&
|
||||
(cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
|
||||
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
|
||||
(cpudata->min_freq > 0))) {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
(cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) &&
|
||||
(policy->cpuinfo.min_freq > 0))) {
|
||||
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
|
||||
__func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
|
||||
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
|
||||
goto skip_test;
|
||||
__func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq,
|
||||
cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cpudata->lowest_nonlinear_freq != policy->min) {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
|
||||
__func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
|
||||
goto skip_test;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cpudata->boost_supported) {
|
||||
if ((policy->max == cpudata->max_freq) ||
|
||||
(policy->max == cpudata->nominal_freq))
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
|
||||
else {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
if ((policy->max != policy->cpuinfo.max_freq) &&
|
||||
(policy->max != cpudata->nominal_freq)) {
|
||||
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
|
||||
__func__, cpu, policy->max, cpudata->max_freq,
|
||||
__func__, cpu, policy->max, policy->cpuinfo.max_freq,
|
||||
cpudata->nominal_freq);
|
||||
goto skip_test;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s cpu%d must support boost!\n", __func__, cpu);
|
||||
goto skip_test;
|
||||
return -EINVAL;
|
||||
}
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
|
||||
return;
|
||||
skip_test:
|
||||
cpufreq_cpu_put(policy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_pstate_set_mode(enum amd_pstate_mode mode)
|
||||
|
@ -266,32 +239,28 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode)
|
|||
return amd_pstate_update_status(mode_str, strlen(mode_str));
|
||||
}
|
||||
|
||||
static void amd_pstate_ut_check_driver(u32 index)
|
||||
static int amd_pstate_ut_check_driver(u32 index)
|
||||
{
|
||||
enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
|
||||
int ret;
|
||||
|
||||
for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
|
||||
ret = amd_pstate_set_mode(mode1);
|
||||
int ret = amd_pstate_set_mode(mode1);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
|
||||
if (mode1 == mode2)
|
||||
continue;
|
||||
ret = amd_pstate_set_mode(mode2);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (ret) {
|
||||
pr_err("%s: failed to update status for %s->%s\n", __func__,
|
||||
amd_pstate_get_mode_string(mode1),
|
||||
amd_pstate_get_mode_string(mode2));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
if (ret)
|
||||
pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
|
||||
amd_pstate_get_mode_string(mode1),
|
||||
amd_pstate_get_mode_string(mode2), ret);
|
||||
|
||||
amd_pstate_ut_cases[index].result = ret ?
|
||||
AMD_PSTATE_UT_RESULT_FAIL :
|
||||
AMD_PSTATE_UT_RESULT_PASS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init amd_pstate_ut_init(void)
|
||||
|
@ -299,16 +268,12 @@ static int __init amd_pstate_ut_init(void)
|
|||
u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
|
||||
|
||||
for (i = 0; i < arr_size; i++) {
|
||||
amd_pstate_ut_cases[i].func(i);
|
||||
switch (amd_pstate_ut_cases[i].result) {
|
||||
case AMD_PSTATE_UT_RESULT_PASS:
|
||||
int ret = amd_pstate_ut_cases[i].func(i);
|
||||
|
||||
if (ret)
|
||||
pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret);
|
||||
else
|
||||
pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
|
||||
break;
|
||||
case AMD_PSTATE_UT_RESULT_FAIL:
|
||||
default:
|
||||
pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -13,6 +13,36 @@
|
|||
/*********************************************************************
|
||||
* AMD P-state INTERFACE *
|
||||
*********************************************************************/
|
||||
|
||||
/**
|
||||
* union perf_cached - A union to cache performance-related data.
|
||||
* @highest_perf: the maximum performance an individual processor may reach,
|
||||
* assuming ideal conditions
|
||||
* For platforms that support the preferred core feature, the highest_perf value maybe
|
||||
* configured to any value in the range 166-255 by the firmware (because the preferred
|
||||
* core ranking is encoded in the highest_perf value). To maintain consistency across
|
||||
* all platforms, we split the highest_perf and preferred core ranking values into
|
||||
* cpudata->perf.highest_perf and cpudata->prefcore_ranking.
|
||||
* @nominal_perf: the maximum sustained performance level of the processor,
|
||||
* assuming ideal operating conditions
|
||||
* @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
|
||||
* savings are achieved
|
||||
* @lowest_perf: the absolute lowest performance level of the processor
|
||||
* @min_limit_perf: Cached value of the performance corresponding to policy->min
|
||||
* @max_limit_perf: Cached value of the performance corresponding to policy->max
|
||||
*/
|
||||
union perf_cached {
|
||||
struct {
|
||||
u8 highest_perf;
|
||||
u8 nominal_perf;
|
||||
u8 lowest_nonlinear_perf;
|
||||
u8 lowest_perf;
|
||||
u8 min_limit_perf;
|
||||
u8 max_limit_perf;
|
||||
};
|
||||
u64 val;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amd_aperf_mperf
|
||||
* @aperf: actual performance frequency clock count
|
||||
|
@ -30,24 +60,11 @@ struct amd_aperf_mperf {
|
|||
* @cpu: CPU number
|
||||
* @req: constraint request to apply
|
||||
* @cppc_req_cached: cached performance request hints
|
||||
* @highest_perf: the maximum performance an individual processor may reach,
|
||||
* assuming ideal conditions
|
||||
* For platforms that do not support the preferred core feature, the
|
||||
* highest_pef may be configured with 166 or 255, to avoid max frequency
|
||||
* calculated wrongly. we take the fixed value as the highest_perf.
|
||||
* @nominal_perf: the maximum sustained performance level of the processor,
|
||||
* assuming ideal operating conditions
|
||||
* @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
|
||||
* savings are achieved
|
||||
* @lowest_perf: the absolute lowest performance level of the processor
|
||||
* @perf: cached performance-related data
|
||||
* @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
|
||||
* priority.
|
||||
* @min_limit_perf: Cached value of the performance corresponding to policy->min
|
||||
* @max_limit_perf: Cached value of the performance corresponding to policy->max
|
||||
* @min_limit_freq: Cached value of policy->min (in khz)
|
||||
* @max_limit_freq: Cached value of policy->max (in khz)
|
||||
* @max_freq: the frequency (in khz) that mapped to highest_perf
|
||||
* @min_freq: the frequency (in khz) that mapped to lowest_perf
|
||||
* @nominal_freq: the frequency (in khz) that mapped to nominal_perf
|
||||
* @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
|
||||
* @cur: Difference of Aperf/Mperf/tsc count between last and current sample
|
||||
|
@ -59,7 +76,6 @@ struct amd_aperf_mperf {
|
|||
* AMD P-State driver supports preferred core featue.
|
||||
* @epp_cached: Cached CPPC energy-performance preference value
|
||||
* @policy: Cpufreq policy value
|
||||
* @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
|
||||
*
|
||||
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
|
||||
* represents all the attributes and goals that AMD P-State requests at runtime.
|
||||
|
@ -70,18 +86,11 @@ struct amd_cpudata {
|
|||
struct freq_qos_request req[2];
|
||||
u64 cppc_req_cached;
|
||||
|
||||
u32 highest_perf;
|
||||
u32 nominal_perf;
|
||||
u32 lowest_nonlinear_perf;
|
||||
u32 lowest_perf;
|
||||
u32 prefcore_ranking;
|
||||
u32 min_limit_perf;
|
||||
u32 max_limit_perf;
|
||||
u32 min_limit_freq;
|
||||
u32 max_limit_freq;
|
||||
union perf_cached perf;
|
||||
|
||||
u32 max_freq;
|
||||
u32 min_freq;
|
||||
u8 prefcore_ranking;
|
||||
u32 min_limit_freq;
|
||||
u32 max_limit_freq;
|
||||
u32 nominal_freq;
|
||||
u32 lowest_nonlinear_freq;
|
||||
|
||||
|
@ -93,11 +102,9 @@ struct amd_cpudata {
|
|||
bool hw_prefcore;
|
||||
|
||||
/* EPP feature related attributes*/
|
||||
s16 epp_cached;
|
||||
u32 policy;
|
||||
u64 cppc_cap1_cached;
|
||||
bool suspended;
|
||||
s16 epp_default;
|
||||
u8 epp_default;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -40,7 +40,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
|
|||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
struct device *cpu_dev;
|
||||
struct clk *clk;
|
||||
|
||||
|
|
|
@ -121,11 +121,9 @@ static int bmips_cpufreq_target_index(struct cpufreq_policy *policy,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int bmips_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
static void bmips_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
kfree(policy->freq_table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bmips_cpufreq_init(struct cpufreq_policy *policy)
|
||||
|
@ -152,7 +150,6 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
|
|||
.get = bmips_cpufreq_get,
|
||||
.init = bmips_cpufreq_init,
|
||||
.exit = bmips_cpufreq_exit,
|
||||
.attr = cpufreq_generic_attr,
|
||||
.name = BMIPS_CPUFREQ_PREFIX,
|
||||
};
|
||||
|
||||
|
|
|
@ -474,14 +474,19 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
|
|||
rc = brcm_avs_get_pmap(priv, NULL);
|
||||
magic = readl(priv->base + AVS_MBOX_MAGIC);
|
||||
|
||||
return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
|
||||
(rc != -EINVAL));
|
||||
return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
|
||||
(rc != -EINVAL);
|
||||
}
|
||||
|
||||
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
struct private_data *priv = policy->driver_data;
|
||||
struct private_data *priv;
|
||||
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
|
@ -715,7 +720,6 @@ cpufreq_freq_attr_ro(brcm_avs_voltage);
|
|||
cpufreq_freq_attr_ro(brcm_avs_frequency);
|
||||
|
||||
static struct freq_attr *brcm_avs_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
&brcm_avs_pstate,
|
||||
&brcm_avs_mode,
|
||||
&brcm_avs_pmap,
|
||||
|
|
|
@ -34,8 +34,6 @@
|
|||
*/
|
||||
static LIST_HEAD(cpu_data_list);
|
||||
|
||||
static bool boost_supported;
|
||||
|
||||
static struct cpufreq_driver cppc_cpufreq_driver;
|
||||
|
||||
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
|
||||
|
@ -208,9 +206,9 @@ static void __init cppc_freq_invariance_init(void)
|
|||
* Fake (unused) bandwidth; workaround to "fix"
|
||||
* priority inheritance.
|
||||
*/
|
||||
.sched_runtime = 1000000,
|
||||
.sched_deadline = 10000000,
|
||||
.sched_period = 10000000,
|
||||
.sched_runtime = NSEC_PER_MSEC,
|
||||
.sched_deadline = 10 * NSEC_PER_MSEC,
|
||||
.sched_period = 10 * NSEC_PER_MSEC,
|
||||
};
|
||||
int ret;
|
||||
|
||||
|
@ -275,15 +273,10 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
|||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpufreq_freqs freqs;
|
||||
u32 desired_perf;
|
||||
int ret = 0;
|
||||
|
||||
desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
|
||||
/* Return if it is exactly the same perf */
|
||||
if (desired_perf == cpu_data->perf_ctrls.desired_perf)
|
||||
return ret;
|
||||
|
||||
cpu_data->perf_ctrls.desired_perf = desired_perf;
|
||||
cpu_data->perf_ctrls.desired_perf =
|
||||
cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
|
||||
freqs.old = policy->cur;
|
||||
freqs.new = target_freq;
|
||||
|
||||
|
@ -409,6 +402,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
|
|||
struct cppc_cpudata *cpu_data;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu_dev->id);
|
||||
if (!policy)
|
||||
return -EINVAL;
|
||||
|
||||
cpu_data = policy->driver_data;
|
||||
perf_caps = &cpu_data->perf_caps;
|
||||
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
|
||||
|
@ -476,6 +472,9 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
|
|||
int step;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu_dev->id);
|
||||
if (!policy)
|
||||
return -EINVAL;
|
||||
|
||||
cpu_data = policy->driver_data;
|
||||
perf_caps = &cpu_data->perf_caps;
|
||||
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
|
||||
|
@ -610,7 +609,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
|
||||
*/
|
||||
policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
|
||||
policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
|
||||
policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ?
|
||||
caps->highest_perf : caps->nominal_perf);
|
||||
|
||||
/*
|
||||
* Set cpuinfo.min_freq to Lowest to make the full range of performance
|
||||
|
@ -618,7 +618,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
* nonlinear perf
|
||||
*/
|
||||
policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
|
||||
policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
|
||||
policy->cpuinfo.max_freq = policy->max;
|
||||
|
||||
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
|
||||
policy->shared_type = cpu_data->shared_type;
|
||||
|
@ -651,7 +651,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
* is supported.
|
||||
*/
|
||||
if (caps->highest_perf > caps->nominal_perf)
|
||||
boost_supported = true;
|
||||
policy->boost_supported = true;
|
||||
|
||||
/* Set policy->cur to max now. The governors will adjust later. */
|
||||
policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
|
||||
|
@ -672,7 +672,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
|
||||
|
@ -689,7 +689,6 @@ static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
|||
caps->lowest_perf, cpu, ret);
|
||||
|
||||
cppc_cpufreq_put_cpu_data(policy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u64 get_delta(u64 t1, u64 t0)
|
||||
|
@ -748,7 +747,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
|
|||
int ret;
|
||||
|
||||
if (!policy)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
cpu_data = policy->driver_data;
|
||||
|
||||
|
@ -790,11 +789,6 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
|
|||
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
|
||||
int ret;
|
||||
|
||||
if (!boost_supported) {
|
||||
pr_err("BOOST not supported by CPU or firmware\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (state)
|
||||
policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
|
||||
else
|
||||
|
|
|
@ -104,6 +104,9 @@ static const struct of_device_id allowlist[] __initconst = {
|
|||
*/
|
||||
static const struct of_device_id blocklist[] __initconst = {
|
||||
{ .compatible = "allwinner,sun50i-h6", },
|
||||
{ .compatible = "allwinner,sun50i-h616", },
|
||||
{ .compatible = "allwinner,sun50i-h618", },
|
||||
{ .compatible = "allwinner,sun50i-h700", },
|
||||
|
||||
{ .compatible = "arm,vexpress", },
|
||||
|
||||
|
@ -165,6 +168,7 @@ static const struct of_device_id blocklist[] __initconst = {
|
|||
{ .compatible = "qcom,sm8350", },
|
||||
{ .compatible = "qcom,sm8450", },
|
||||
{ .compatible = "qcom,sm8550", },
|
||||
{ .compatible = "qcom,sm8650", },
|
||||
|
||||
{ .compatible = "st,stih407", },
|
||||
{ .compatible = "st,stih410", },
|
||||
|
@ -191,19 +195,18 @@ static const struct of_device_id blocklist[] __initconst = {
|
|||
|
||||
static bool __init cpu0_node_has_opp_v2_prop(void)
|
||||
{
|
||||
struct device_node *np = of_cpu_device_node_get(0);
|
||||
struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
|
||||
bool ret = false;
|
||||
|
||||
if (of_property_present(np, "operating-points-v2"))
|
||||
ret = true;
|
||||
|
||||
of_node_put(np);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init cpufreq_dt_platdev_init(void)
|
||||
{
|
||||
struct device_node *np = of_find_node_by_path("/");
|
||||
struct device_node *np __free(device_node) = of_find_node_by_path("/");
|
||||
const struct of_device_id *match;
|
||||
const void *data = NULL;
|
||||
|
||||
|
@ -219,14 +222,13 @@ static int __init cpufreq_dt_platdev_init(void)
|
|||
if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
|
||||
goto create_pdev;
|
||||
|
||||
of_node_put(np);
|
||||
return -ENODEV;
|
||||
|
||||
create_pdev:
|
||||
of_node_put(np);
|
||||
return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
|
||||
-1, data,
|
||||
sizeof(struct cpufreq_dt_platform_data)));
|
||||
}
|
||||
core_initcall(cpufreq_dt_platdev_init);
|
||||
MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -36,12 +36,6 @@ struct private_data {
|
|||
|
||||
static LIST_HEAD(priv_list);
|
||||
|
||||
static struct freq_attr *cpufreq_dt_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL, /* Extra space for boost-attr if required */
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct private_data *cpufreq_dt_find_data(int cpu)
|
||||
{
|
||||
struct private_data *priv;
|
||||
|
@ -68,36 +62,22 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
*/
|
||||
static const char *find_supply_name(struct device *dev)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct property *pp;
|
||||
struct device_node *np __free(device_node) = of_node_get(dev->of_node);
|
||||
int cpu = dev->id;
|
||||
const char *name = NULL;
|
||||
|
||||
np = of_node_get(dev->of_node);
|
||||
|
||||
/* This must be valid for sure */
|
||||
if (WARN_ON(!np))
|
||||
return NULL;
|
||||
|
||||
/* Try "cpu0" for older DTs */
|
||||
if (!cpu) {
|
||||
pp = of_find_property(np, "cpu0-supply", NULL);
|
||||
if (pp) {
|
||||
name = "cpu0";
|
||||
goto node_put;
|
||||
}
|
||||
}
|
||||
if (!cpu && of_property_present(np, "cpu0-supply"))
|
||||
return "cpu0";
|
||||
|
||||
pp = of_find_property(np, "cpu-supply", NULL);
|
||||
if (pp) {
|
||||
name = "cpu";
|
||||
goto node_put;
|
||||
}
|
||||
if (of_property_present(np, "cpu-supply"))
|
||||
return "cpu";
|
||||
|
||||
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
|
||||
node_put:
|
||||
of_node_put(np);
|
||||
return name;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int cpufreq_init(struct cpufreq_policy *policy)
|
||||
|
@ -134,23 +114,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->cpuinfo.transition_latency = transition_latency;
|
||||
policy->dvfs_possible_from_any_cpu = true;
|
||||
|
||||
/* Support turbo/boost mode */
|
||||
if (policy_has_boost_freq(policy)) {
|
||||
/* This gets disabled by core on driver unregister */
|
||||
ret = cpufreq_enable_boost_support();
|
||||
if (ret)
|
||||
goto out_clk_put;
|
||||
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
|
||||
}
|
||||
|
||||
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
|
||||
|
||||
return 0;
|
||||
|
||||
out_clk_put:
|
||||
clk_put(cpu_clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cpufreq_online(struct cpufreq_policy *policy)
|
||||
|
@ -168,10 +134,9 @@ static int cpufreq_offline(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_exit(struct cpufreq_policy *policy)
|
||||
static void cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
clk_put(policy->clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver dt_cpufreq_driver = {
|
||||
|
@ -185,7 +150,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
|
|||
.online = cpufreq_online,
|
||||
.offline = cpufreq_offline,
|
||||
.name = "cpufreq-dt",
|
||||
.attr = cpufreq_dt_attr,
|
||||
.set_boost = cpufreq_boost_set_sw,
|
||||
.suspend = cpufreq_generic_suspend,
|
||||
};
|
||||
|
||||
|
@ -319,7 +284,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
|
|||
int ret, cpu;
|
||||
|
||||
/* Request resources early so we can return in case of -EPROBE_DEFER */
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
|
|
@ -359,11 +359,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nforce2_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver nforce2_driver = {
|
||||
.name = "nforce2",
|
||||
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
|
||||
|
@ -371,7 +366,6 @@ static struct cpufreq_driver nforce2_driver = {
|
|||
.target = nforce2_target,
|
||||
.get = nforce2_get,
|
||||
.init = nforce2_cpu_init,
|
||||
.exit = nforce2_cpu_exit,
|
||||
};
|
||||
|
||||
#ifdef MODULE
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/tick.h>
|
||||
|
@ -534,16 +535,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
|
|||
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
|
||||
|
||||
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq, unsigned int relation)
|
||||
unsigned int target_freq,
|
||||
unsigned int min, unsigned int max,
|
||||
unsigned int relation)
|
||||
{
|
||||
unsigned int idx;
|
||||
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
target_freq = clamp_val(target_freq, min, max);
|
||||
|
||||
if (!policy->freq_table)
|
||||
return target_freq;
|
||||
|
||||
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
|
||||
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
|
||||
policy->cached_resolved_idx = idx;
|
||||
policy->cached_target_freq = target_freq;
|
||||
return policy->freq_table[idx].frequency;
|
||||
|
@ -563,7 +566,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
|||
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
|
||||
unsigned int min = READ_ONCE(policy->min);
|
||||
unsigned int max = READ_ONCE(policy->max);
|
||||
|
||||
/*
|
||||
* If this function runs in parallel with cpufreq_set_policy(), it may
|
||||
* read policy->min before the update and policy->max after the update
|
||||
* or the other way around, so there is no ordering guarantee.
|
||||
*
|
||||
* Resolve this by always honoring the max (in case it comes from
|
||||
* thermal throttling or similar).
|
||||
*/
|
||||
if (unlikely(min > max))
|
||||
min = max;
|
||||
|
||||
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
|
||||
|
||||
|
@ -599,26 +616,25 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
|
|||
static ssize_t show_boost(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
|
||||
return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
|
||||
}
|
||||
|
||||
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret, enable;
|
||||
bool enable;
|
||||
|
||||
ret = sscanf(buf, "%d", &enable);
|
||||
if (ret != 1 || enable < 0 || enable > 1)
|
||||
if (kstrtobool(buf, &enable))
|
||||
return -EINVAL;
|
||||
|
||||
if (cpufreq_boost_trigger_state(enable)) {
|
||||
pr_err("%s: Cannot %s BOOST!\n",
|
||||
__func__, enable ? "enable" : "disable");
|
||||
__func__, str_enable_disable(enable));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pr_debug("%s: cpufreq BOOST %s\n",
|
||||
__func__, enable ? "enabled" : "disabled");
|
||||
__func__, str_enabled_disabled(enable));
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -632,15 +648,18 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
|
|||
static ssize_t store_local_boost(struct cpufreq_policy *policy,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret, enable;
|
||||
int ret;
|
||||
bool enable;
|
||||
|
||||
ret = kstrtoint(buf, 10, &enable);
|
||||
if (ret || enable < 0 || enable > 1)
|
||||
if (kstrtobool(buf, &enable))
|
||||
return -EINVAL;
|
||||
|
||||
if (!cpufreq_driver->boost_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!policy->boost_supported)
|
||||
return -EINVAL;
|
||||
|
||||
if (policy->boost_enabled == enable)
|
||||
return count;
|
||||
|
||||
|
@ -730,7 +749,7 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
|
|||
static ssize_t show_##file_name \
|
||||
(struct cpufreq_policy *policy, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", policy->object); \
|
||||
return sysfs_emit(buf, "%u\n", policy->object); \
|
||||
}
|
||||
|
||||
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
|
||||
|
@ -751,11 +770,11 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
|
|||
|
||||
freq = arch_freq_get_on_cpu(policy->cpu);
|
||||
if (freq)
|
||||
ret = sprintf(buf, "%u\n", freq);
|
||||
ret = sysfs_emit(buf, "%u\n", freq);
|
||||
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
|
||||
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
|
||||
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
|
||||
else
|
||||
ret = sprintf(buf, "%u\n", policy->cur);
|
||||
ret = sysfs_emit(buf, "%u\n", policy->cur);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -789,9 +808,9 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
|
|||
unsigned int cur_freq = __cpufreq_get(policy);
|
||||
|
||||
if (cur_freq)
|
||||
return sprintf(buf, "%u\n", cur_freq);
|
||||
return sysfs_emit(buf, "%u\n", cur_freq);
|
||||
|
||||
return sprintf(buf, "<unknown>\n");
|
||||
return sysfs_emit(buf, "<unknown>\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -800,12 +819,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
|
|||
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
|
||||
return sprintf(buf, "powersave\n");
|
||||
return sysfs_emit(buf, "powersave\n");
|
||||
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
return sprintf(buf, "performance\n");
|
||||
return sysfs_emit(buf, "performance\n");
|
||||
else if (policy->governor)
|
||||
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
|
||||
policy->governor->name);
|
||||
return sysfs_emit(buf, "%s\n", policy->governor->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -864,7 +882,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
|
|||
struct cpufreq_governor *t;
|
||||
|
||||
if (!has_target()) {
|
||||
i += sprintf(buf, "performance powersave");
|
||||
i += sysfs_emit(buf, "performance powersave");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -873,11 +891,11 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
|
|||
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
|
||||
- (CPUFREQ_NAME_LEN + 2)))
|
||||
break;
|
||||
i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
|
||||
i += sysfs_emit_at(buf, i, "%s ", t->name);
|
||||
}
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
out:
|
||||
i += sprintf(&buf[i], "\n");
|
||||
i += sysfs_emit_at(buf, i, "\n");
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -887,7 +905,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
|
|||
unsigned int cpu;
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
|
||||
i += sysfs_emit_at(buf, i, "%u ", cpu);
|
||||
if (i >= (PAGE_SIZE - 5))
|
||||
break;
|
||||
}
|
||||
|
@ -895,7 +913,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
|
|||
/* Remove the extra space at the end */
|
||||
i--;
|
||||
|
||||
i += sprintf(&buf[i], "\n");
|
||||
i += sysfs_emit_at(buf, i, "\n");
|
||||
return i;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
|
||||
|
@ -938,7 +956,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
|
|||
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
if (!policy->governor || !policy->governor->show_setspeed)
|
||||
return sprintf(buf, "<unsupported>\n");
|
||||
return sysfs_emit(buf, "<unsupported>\n");
|
||||
|
||||
return policy->governor->show_setspeed(policy, buf);
|
||||
}
|
||||
|
@ -952,8 +970,8 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
|
|||
int ret;
|
||||
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
|
||||
if (!ret)
|
||||
return sprintf(buf, "%u\n", limit);
|
||||
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
|
||||
return sysfs_emit(buf, "%u\n", limit);
|
||||
return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
|
||||
}
|
||||
|
||||
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
|
||||
|
@ -1081,6 +1099,21 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
|
|||
struct freq_attr **drv_attr;
|
||||
int ret = 0;
|
||||
|
||||
/* Attributes that need freq_table */
|
||||
if (policy->freq_table) {
|
||||
ret = sysfs_create_file(&policy->kobj,
|
||||
&cpufreq_freq_attr_scaling_available_freqs.attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cpufreq_boost_supported()) {
|
||||
ret = sysfs_create_file(&policy->kobj,
|
||||
&cpufreq_freq_attr_scaling_boost_freqs.attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* set up files for this cpu device */
|
||||
drv_attr = cpufreq_driver->attr;
|
||||
while (drv_attr && *drv_attr) {
|
||||
|
@ -1425,9 +1458,6 @@ static int cpufreq_online(unsigned int cpu)
|
|||
goto out_free_policy;
|
||||
}
|
||||
|
||||
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
|
||||
policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
|
||||
|
||||
/*
|
||||
* The initialization has succeeded and the policy is online.
|
||||
* If there is a problem with its frequency table, take it
|
||||
|
@ -1490,6 +1520,10 @@ static int cpufreq_online(unsigned int cpu)
|
|||
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_CREATE_POLICY, policy);
|
||||
} else {
|
||||
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
|
||||
if (ret < 0)
|
||||
goto out_destroy_policy;
|
||||
}
|
||||
|
||||
if (cpufreq_driver->get && has_target()) {
|
||||
|
@ -1535,7 +1569,7 @@ static int cpufreq_online(unsigned int cpu)
|
|||
* frequency for longer duration. Hence, a BUG_ON().
|
||||
*/
|
||||
BUG_ON(ret);
|
||||
pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
|
||||
pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
|
||||
__func__, policy->cpu, old_freq, policy->cur);
|
||||
}
|
||||
}
|
||||
|
@ -1583,6 +1617,19 @@ static int cpufreq_online(unsigned int cpu)
|
|||
if (cpufreq_thermal_control_enabled(cpufreq_driver))
|
||||
policy->cdev = of_cpufreq_cooling_register(policy);
|
||||
|
||||
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
|
||||
if (cpufreq_driver->set_boost && policy->boost_supported &&
|
||||
policy->boost_enabled != cpufreq_boost_enabled()) {
|
||||
policy->boost_enabled = cpufreq_boost_enabled();
|
||||
ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
|
||||
if (ret) {
|
||||
/* If the set_boost fails, the online operation is not affected */
|
||||
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
|
||||
str_enable_disable(policy->boost_enabled));
|
||||
policy->boost_enabled = !policy->boost_enabled;
|
||||
}
|
||||
}
|
||||
|
||||
pr_debug("initialization complete\n");
|
||||
|
||||
return 0;
|
||||
|
@ -2334,7 +2381,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||
if (cpufreq_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
target_freq = __resolve_freq(policy, target_freq, relation);
|
||||
target_freq = __resolve_freq(policy, target_freq, policy->min,
|
||||
policy->max, relation);
|
||||
|
||||
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
|
||||
policy->cpu, target_freq, relation, old_target_freq);
|
||||
|
@ -2658,11 +2706,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||
* Resolve policy min/max to available frequencies. It ensures
|
||||
* no frequency resolution will neither overshoot the requested maximum
|
||||
* nor undershoot the requested minimum.
|
||||
*
|
||||
* Avoid storing intermediate values in policy->max or policy->min and
|
||||
* compiler optimizations around them because they may be accessed
|
||||
* concurrently by cpufreq_driver_resolve_freq() during the update.
|
||||
*/
|
||||
policy->min = new_data.min;
|
||||
policy->max = new_data.max;
|
||||
policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
|
||||
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
|
||||
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
|
||||
new_data.min, new_data.max,
|
||||
CPUFREQ_RELATION_H));
|
||||
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
|
||||
new_data.max, CPUFREQ_RELATION_L);
|
||||
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
|
||||
|
||||
trace_cpu_frequency_limits(policy);
|
||||
|
||||
cpufreq_update_pressure(policy);
|
||||
|
@ -2775,7 +2830,7 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
|
|||
/*********************************************************************
|
||||
* BOOST *
|
||||
*********************************************************************/
|
||||
static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
|
||||
int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -2794,6 +2849,7 @@ static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
|
||||
|
||||
int cpufreq_boost_trigger_state(int state)
|
||||
{
|
||||
|
@ -2810,6 +2866,9 @@ int cpufreq_boost_trigger_state(int state)
|
|||
|
||||
cpus_read_lock();
|
||||
for_each_active_policy(policy) {
|
||||
if (!policy->boost_supported)
|
||||
continue;
|
||||
|
||||
policy->boost_enabled = state;
|
||||
ret = cpufreq_driver->set_boost(policy, state);
|
||||
if (ret) {
|
||||
|
@ -2829,7 +2888,7 @@ err_reset_state:
|
|||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
pr_err("%s: Cannot %s BOOST\n",
|
||||
__func__, state ? "enable" : "disable");
|
||||
__func__, str_enable_disable(state));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2857,22 +2916,7 @@ static void remove_boost_sysfs_file(void)
|
|||
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
|
||||
}
|
||||
|
||||
int cpufreq_enable_boost_support(void)
|
||||
{
|
||||
if (!cpufreq_driver)
|
||||
return -EINVAL;
|
||||
|
||||
if (cpufreq_boost_supported())
|
||||
return 0;
|
||||
|
||||
cpufreq_driver->set_boost = cpufreq_boost_set_sw;
|
||||
|
||||
/* This will get removed on driver unregister */
|
||||
return create_boost_sysfs_file();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
|
||||
|
||||
int cpufreq_boost_enabled(void)
|
||||
bool cpufreq_boost_enabled(void)
|
||||
{
|
||||
return cpufreq_driver->boost_enabled;
|
||||
}
|
||||
|
|
|
@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
|
|||
time_elapsed = update_time - j_cdbs->prev_update_time;
|
||||
j_cdbs->prev_update_time = update_time;
|
||||
|
||||
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
|
||||
/*
|
||||
* cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
|
||||
* it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
|
||||
* off, where idle_time is calculated by the difference between
|
||||
* time elapsed in jiffies and "busy time" obtained from CPU
|
||||
* statistics. If a CPU is 100% busy, the time elapsed and busy
|
||||
* time should grow with the same amount in two consecutive
|
||||
* samples, but in practice there could be a tiny difference,
|
||||
* making the accumulated idle time decrease sometimes. Hence,
|
||||
* in this case, idle_time should be regarded as 0 in order to
|
||||
* make the further process correct.
|
||||
*/
|
||||
if (cur_idle_time > j_cdbs->prev_cpu_idle)
|
||||
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
|
||||
else
|
||||
idle_time = 0;
|
||||
|
||||
j_cdbs->prev_cpu_idle = cur_idle_time;
|
||||
|
||||
if (ignore_nice) {
|
||||
|
@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
|
|||
* calls, so the previous load value can be used then.
|
||||
*/
|
||||
load = j_cdbs->prev_load;
|
||||
} else if (unlikely((int)idle_time > 2 * sampling_rate &&
|
||||
} else if (unlikely(idle_time > 2 * sampling_rate &&
|
||||
j_cdbs->prev_load)) {
|
||||
/*
|
||||
* If the CPU had gone completely idle and a task has
|
||||
|
@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
|
|||
load = j_cdbs->prev_load;
|
||||
j_cdbs->prev_load = 0;
|
||||
} else {
|
||||
if (time_elapsed >= idle_time) {
|
||||
if (time_elapsed > idle_time)
|
||||
load = 100 * (time_elapsed - idle_time) / time_elapsed;
|
||||
} else {
|
||||
/*
|
||||
* That can happen if idle_time is returned by
|
||||
* get_cpu_idle_time_jiffy(). In that case
|
||||
* idle_time is roughly equal to the difference
|
||||
* between time_elapsed and "busy time" obtained
|
||||
* from CPU statistics. Then, the "busy time"
|
||||
* can end up being greater than time_elapsed
|
||||
* (for example, if jiffies_64 and the CPU
|
||||
* statistics are updated by different CPUs),
|
||||
* so idle_time may in fact be negative. That
|
||||
* means, though, that the CPU was busy all
|
||||
* the time (on the rough average) during the
|
||||
* last sampling interval and 100 can be
|
||||
* returned as the load.
|
||||
*/
|
||||
load = (int)idle_time < 0 ? 100 : 0;
|
||||
}
|
||||
else
|
||||
load = 0;
|
||||
|
||||
j_cdbs->prev_load = load;
|
||||
}
|
||||
|
||||
if (unlikely((int)idle_time > 2 * sampling_rate)) {
|
||||
if (unlikely(idle_time > 2 * sampling_rate)) {
|
||||
unsigned int periods = idle_time / sampling_rate;
|
||||
|
||||
if (periods < idle_periods)
|
||||
|
|
|
@ -77,7 +77,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
|
|||
return freq_next;
|
||||
}
|
||||
|
||||
index = cpufreq_frequency_table_target(policy, freq_next, relation);
|
||||
index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
|
||||
policy->max, relation);
|
||||
freq_req = freq_table[index].frequency;
|
||||
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
|
||||
freq_avg = freq_req - freq_reduc;
|
||||
|
|
|
@ -101,7 +101,6 @@ static struct cpufreq_driver davinci_driver = {
|
|||
.get = cpufreq_generic_get,
|
||||
.init = davinci_cpu_init,
|
||||
.name = "davinci",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -360,14 +360,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int eps_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void eps_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
|
||||
/* Bye */
|
||||
kfree(eps_cpu[cpu]);
|
||||
eps_cpu[cpu] = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver eps_driver = {
|
||||
|
@ -377,7 +376,6 @@ static struct cpufreq_driver eps_driver = {
|
|||
.exit = eps_cpu_exit,
|
||||
.get = eps_get,
|
||||
.name = "e_powersaver",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -194,7 +194,6 @@ static struct cpufreq_driver elanfreq_driver = {
|
|||
.target_index = elanfreq_target,
|
||||
.init = elanfreq_cpu_init,
|
||||
.name = "elanfreq",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id elan_id[] = {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* FREQUENCY TABLE HELPERS *
|
||||
*********************************************************************/
|
||||
|
||||
bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
||||
static bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
|
||||
|
||||
|
@ -27,7 +27,6 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
|||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(policy_has_boost_freq);
|
||||
|
||||
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table)
|
||||
|
@ -70,7 +69,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
|
|||
struct cpufreq_frequency_table *table)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos;
|
||||
unsigned int freq, next_larger = ~0;
|
||||
unsigned int freq, prev_smaller = 0;
|
||||
bool found = false;
|
||||
|
||||
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
|
||||
|
@ -86,12 +85,12 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
|
|||
break;
|
||||
}
|
||||
|
||||
if ((next_larger > freq) && (freq > policy->max))
|
||||
next_larger = freq;
|
||||
if ((prev_smaller < freq) && (freq <= policy->max))
|
||||
prev_smaller = freq;
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
policy->max = next_larger;
|
||||
policy->max = prev_smaller;
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
}
|
||||
|
||||
|
@ -116,8 +115,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
|
|||
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
||||
|
||||
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
unsigned int target_freq, unsigned int min,
|
||||
unsigned int max, unsigned int relation)
|
||||
{
|
||||
struct cpufreq_frequency_table optimal = {
|
||||
.driver_data = ~0,
|
||||
|
@ -148,7 +147,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
|||
cpufreq_for_each_valid_entry_idx(pos, table, i) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if ((freq < policy->min) || (freq > policy->max))
|
||||
if (freq < min || freq > max)
|
||||
continue;
|
||||
if (freq == target_freq) {
|
||||
optimal.driver_data = i;
|
||||
|
@ -194,7 +193,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
|||
}
|
||||
if (optimal.driver_data > i) {
|
||||
if (suboptimal.driver_data > i) {
|
||||
WARN(1, "Invalid frequency table: %d\n", policy->cpu);
|
||||
WARN(1, "Invalid frequency table: %u\n", policy->cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -254,7 +253,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
|
|||
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
|
||||
continue;
|
||||
|
||||
count += sprintf(&buf[count], "%d ", pos->frequency);
|
||||
count += sprintf(&buf[count], "%u ", pos->frequency);
|
||||
}
|
||||
count += sprintf(&buf[count], "\n");
|
||||
|
||||
|
@ -367,6 +366,10 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Driver's may have set this field already */
|
||||
if (policy_has_boost_freq(policy))
|
||||
policy->boost_supported = true;
|
||||
|
||||
return set_freq_table_sorted(policy);
|
||||
}
|
||||
|
||||
|
|
|
@ -205,7 +205,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
|
|||
.get = cpufreq_generic_get,
|
||||
.init = imx6q_cpufreq_init,
|
||||
.name = "imx6q-cpufreq",
|
||||
.attr = cpufreq_generic_attr,
|
||||
.suspend = cpufreq_generic_suspend,
|
||||
};
|
||||
|
||||
|
|
|
@ -2212,7 +2212,7 @@ static int knl_get_turbo_pstate(int cpu)
|
|||
static int hwp_get_cpu_scaling(int cpu)
|
||||
{
|
||||
if (hybrid_scaling_factor) {
|
||||
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
u8 cpu_type = c->topo.intel_type;
|
||||
|
||||
/*
|
||||
|
@ -2964,13 +2964,11 @@ static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
|
|||
return intel_cpufreq_cpu_offline(policy);
|
||||
}
|
||||
|
||||
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void intel_pstate_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
pr_debug("CPU %d exiting\n", policy->cpu);
|
||||
|
||||
policy->fast_switch_possible = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
|
@ -3301,7 +3299,7 @@ pstate_exit:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct freq_qos_request *req;
|
||||
|
||||
|
@ -3311,7 +3309,7 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
|||
freq_qos_remove_request(req);
|
||||
kfree(req);
|
||||
|
||||
return intel_pstate_cpu_exit(policy);
|
||||
intel_pstate_cpu_exit(policy);
|
||||
}
|
||||
|
||||
static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
|
||||
|
|
|
@ -96,7 +96,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
|
|||
.target_index = kirkwood_cpufreq_target,
|
||||
.init = kirkwood_cpufreq_cpu_init,
|
||||
.name = "kirkwood-cpufreq",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -1,241 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2011 Dmitry Eremin-Solenikov
|
||||
* Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
||||
* and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
|
||||
*
|
||||
* This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
|
||||
* that is iMac G5 and latest single CPU desktop.
|
||||
*/
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#define DBG(fmt...) pr_debug(fmt)
|
||||
|
||||
/* see 970FX user manual */
|
||||
|
||||
#define SCOM_PCR 0x0aa001 /* PCR scom addr */
|
||||
|
||||
#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
|
||||
#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
|
||||
#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
|
||||
#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
|
||||
#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
|
||||
#define PCR_SPEED_SHIFT 17
|
||||
#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
|
||||
#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
|
||||
#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
|
||||
#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
|
||||
#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
|
||||
#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
|
||||
|
||||
#define SCOM_PSR 0x408001 /* PSR scom addr */
|
||||
/* warning: PSR is a 64 bits register */
|
||||
#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
|
||||
#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
|
||||
#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
|
||||
#define PSR_CUR_SPEED_SHIFT (56)
|
||||
|
||||
/*
|
||||
* The G5 only supports two frequencies (Quarter speed is not supported)
|
||||
*/
|
||||
#define CPUFREQ_HIGH 0
|
||||
#define CPUFREQ_LOW 1
|
||||
|
||||
static struct cpufreq_frequency_table maple_cpu_freqs[] = {
|
||||
{0, CPUFREQ_HIGH, 0},
|
||||
{0, CPUFREQ_LOW, 0},
|
||||
{0, 0, CPUFREQ_TABLE_END},
|
||||
};
|
||||
|
||||
/* Power mode data is an array of the 32 bits PCR values to use for
|
||||
* the various frequencies, retrieved from the device-tree
|
||||
*/
|
||||
static int maple_pmode_cur;
|
||||
|
||||
static const u32 *maple_pmode_data;
|
||||
static int maple_pmode_max;
|
||||
|
||||
/*
|
||||
* SCOM based frequency switching for 970FX rev3
|
||||
*/
|
||||
static int maple_scom_switch_freq(int speed_mode)
|
||||
{
|
||||
unsigned long flags;
|
||||
int to;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Clear PCR high */
|
||||
scom970_write(SCOM_PCR, 0);
|
||||
/* Clear PCR low */
|
||||
scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
|
||||
/* Set PCR low */
|
||||
scom970_write(SCOM_PCR, PCR_HILO_SELECT |
|
||||
maple_pmode_data[speed_mode]);
|
||||
|
||||
/* Wait for completion */
|
||||
for (to = 0; to < 10; to++) {
|
||||
unsigned long psr = scom970_read(SCOM_PSR);
|
||||
|
||||
if ((psr & PSR_CMD_RECEIVED) == 0 &&
|
||||
(((psr >> PSR_CUR_SPEED_SHIFT) ^
|
||||
(maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
|
||||
== 0)
|
||||
break;
|
||||
if (psr & PSR_CMD_COMPLETED)
|
||||
break;
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
maple_pmode_cur = speed_mode;
|
||||
ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int maple_scom_query_freq(void)
|
||||
{
|
||||
unsigned long psr = scom970_read(SCOM_PSR);
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= maple_pmode_max; i++)
|
||||
if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
|
||||
(maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
|
||||
break;
|
||||
return i;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common interface to the cpufreq core
|
||||
*/
|
||||
|
||||
static int maple_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int index)
|
||||
{
|
||||
return maple_scom_switch_freq(index);
|
||||
}
|
||||
|
||||
static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
|
||||
{
|
||||
return maple_cpu_freqs[maple_pmode_cur].frequency;
|
||||
}
|
||||
|
||||
static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver maple_cpufreq_driver = {
|
||||
.name = "maple",
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.init = maple_cpufreq_cpu_init,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = maple_cpufreq_target,
|
||||
.get = maple_cpufreq_get_speed,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int __init maple_cpufreq_init(void)
|
||||
{
|
||||
struct device_node *cpunode;
|
||||
unsigned int psize;
|
||||
unsigned long max_freq;
|
||||
const u32 *valp;
|
||||
u32 pvr_hi;
|
||||
int rc = -ENODEV;
|
||||
|
||||
/*
|
||||
* Behave here like powermac driver which checks machine compatibility
|
||||
* to ease merging of two drivers in future.
|
||||
*/
|
||||
if (!of_machine_is_compatible("Momentum,Maple") &&
|
||||
!of_machine_is_compatible("Momentum,Apache"))
|
||||
return 0;
|
||||
|
||||
/* Get first CPU node */
|
||||
cpunode = of_cpu_device_node_get(0);
|
||||
if (cpunode == NULL) {
|
||||
pr_err("Can't find any CPU 0 node\n");
|
||||
goto bail_noprops;
|
||||
}
|
||||
|
||||
/* Check 970FX for now */
|
||||
/* we actually don't care on which CPU to access PVR */
|
||||
pvr_hi = PVR_VER(mfspr(SPRN_PVR));
|
||||
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
|
||||
pr_err("Unsupported CPU version (%x)\n", pvr_hi);
|
||||
goto bail_noprops;
|
||||
}
|
||||
|
||||
/* Look for the powertune data in the device-tree */
|
||||
/*
|
||||
* On Maple this property is provided by PIBS in dual-processor config,
|
||||
* not provided by PIBS in CPU0 config and also not provided by SLOF,
|
||||
* so YMMV
|
||||
*/
|
||||
maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
|
||||
if (!maple_pmode_data) {
|
||||
DBG("No power-mode-data !\n");
|
||||
goto bail_noprops;
|
||||
}
|
||||
maple_pmode_max = psize / sizeof(u32) - 1;
|
||||
|
||||
/*
|
||||
* From what I see, clock-frequency is always the maximal frequency.
|
||||
* The current driver can not slew sysclk yet, so we really only deal
|
||||
* with powertune steps for now. We also only implement full freq and
|
||||
* half freq in this version. So far, I haven't yet seen a machine
|
||||
* supporting anything else.
|
||||
*/
|
||||
valp = of_get_property(cpunode, "clock-frequency", NULL);
|
||||
if (!valp)
|
||||
goto bail_noprops;
|
||||
max_freq = (*valp)/1000;
|
||||
maple_cpu_freqs[0].frequency = max_freq;
|
||||
maple_cpu_freqs[1].frequency = max_freq/2;
|
||||
|
||||
/* Force apply current frequency to make sure everything is in
|
||||
* sync (voltage is right for example). Firmware may leave us with
|
||||
* a strange setting ...
|
||||
*/
|
||||
msleep(10);
|
||||
maple_pmode_cur = -1;
|
||||
maple_scom_switch_freq(maple_scom_query_freq());
|
||||
|
||||
pr_info("Registering Maple CPU frequency driver\n");
|
||||
pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
|
||||
maple_cpu_freqs[1].frequency/1000,
|
||||
maple_cpu_freqs[0].frequency/1000,
|
||||
maple_cpu_freqs[maple_pmode_cur].frequency/1000);
|
||||
|
||||
rc = cpufreq_register_driver(&maple_cpufreq_driver);
|
||||
|
||||
bail_noprops:
|
||||
of_node_put(cpunode);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
module_init(maple_cpufreq_init);
|
||||
|
||||
|
||||
MODULE_LICENSE("GPL");
|
|
@ -453,13 +453,11 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
static void mtk_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct mtk_cpu_dvfs_info *info = policy->driver_data;
|
||||
|
||||
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver mtk_cpufreq_driver = {
|
||||
|
@ -472,7 +470,6 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
|
|||
.init = mtk_cpufreq_init,
|
||||
.exit = mtk_cpufreq_exit,
|
||||
.name = "mtk-cpufreq",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int mtk_cpufreq_probe(struct platform_device *pdev)
|
||||
|
@ -480,7 +477,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
|
|||
struct mtk_cpu_dvfs_info *info, *tmp;
|
||||
int cpu, ret;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
info = mtk_cpu_dvfs_info_lookup(cpu);
|
||||
if (info)
|
||||
continue;
|
||||
|
|
|
@ -56,7 +56,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
|
|||
* it), and registers the clock notifier that will take care
|
||||
* of doing the PMSU part of a frequency transition.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
struct device *cpu_dev;
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
|
|
|
@ -28,9 +28,6 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
/* OPP tolerance in percentage */
|
||||
#define OPP_TOLERANCE 4
|
||||
|
||||
|
@ -136,11 +133,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int omap_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void omap_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
freq_table_free();
|
||||
clk_put(policy->clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver omap_driver = {
|
||||
|
@ -151,7 +147,6 @@ static struct cpufreq_driver omap_driver = {
|
|||
.init = omap_cpu_init,
|
||||
.exit = omap_cpu_exit,
|
||||
.name = "omap",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int omap_cpufreq_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -227,7 +227,6 @@ static struct cpufreq_driver p4clockmod_driver = {
|
|||
.init = cpufreq_p4_cpu_init,
|
||||
.get = cpufreq_p4_get,
|
||||
.name = "p4-clockmod",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id cpufreq_p4_id[] = {
|
||||
|
|
|
@ -204,21 +204,19 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
/*
|
||||
* We don't support CPU hotplug. Don't unmap after the system
|
||||
* has already made it to a running state.
|
||||
*/
|
||||
if (system_state >= SYSTEM_RUNNING)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (sdcasr_mapbase)
|
||||
iounmap(sdcasr_mapbase);
|
||||
if (sdcpwr_mapbase)
|
||||
iounmap(sdcpwr_mapbase);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pas_cpufreq_target(struct cpufreq_policy *policy,
|
||||
|
@ -247,7 +245,6 @@ static struct cpufreq_driver pas_cpufreq_driver = {
|
|||
.exit = pas_cpufreq_cpu_exit,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = pas_cpufreq_target,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -271,5 +268,6 @@ static void __exit pas_cpufreq_exit(void)
|
|||
module_init(pas_cpufreq_init);
|
||||
module_exit(pas_cpufreq_exit);
|
||||
|
||||
MODULE_DESCRIPTION("cpufreq driver for PA Semi PWRficient");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
|
||||
|
|
|
@ -562,18 +562,12 @@ out:
|
|||
return result;
|
||||
}
|
||||
|
||||
static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver pcc_cpufreq_driver = {
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.get = pcc_get_freq,
|
||||
.verify = pcc_cpufreq_verify,
|
||||
.target = pcc_cpufreq_target,
|
||||
.init = pcc_cpufreq_cpu_init,
|
||||
.exit = pcc_cpufreq_cpu_exit,
|
||||
.name = "pcc-cpufreq",
|
||||
};
|
||||
|
||||
|
|
|
@ -120,9 +120,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
|
|||
|
||||
/* tweak L2 for high voltage */
|
||||
if (has_cpu_l2lve) {
|
||||
hid2 = mfspr(SPRN_HID2);
|
||||
hid2 = mfspr(SPRN_HID2_750FX);
|
||||
hid2 &= ~0x2000;
|
||||
mtspr(SPRN_HID2, hid2);
|
||||
mtspr(SPRN_HID2_750FX, hid2);
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
|
@ -131,9 +131,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
|
|||
if (low_speed == 1) {
|
||||
/* tweak L2 for low voltage */
|
||||
if (has_cpu_l2lve) {
|
||||
hid2 = mfspr(SPRN_HID2);
|
||||
hid2 = mfspr(SPRN_HID2_750FX);
|
||||
hid2 |= 0x2000;
|
||||
mtspr(SPRN_HID2, hid2);
|
||||
mtspr(SPRN_HID2_750FX, hid2);
|
||||
}
|
||||
|
||||
/* ramping down, set voltage last */
|
||||
|
@ -439,7 +439,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
|
|||
.suspend = pmac_cpufreq_suspend,
|
||||
.resume = pmac_cpufreq_resume,
|
||||
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
|
||||
.attr = cpufreq_generic_attr,
|
||||
.name = "powermac",
|
||||
};
|
||||
|
||||
|
|
|
@ -332,7 +332,6 @@ static struct cpufreq_driver g5_cpufreq_driver = {
|
|||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = g5_cpufreq_target,
|
||||
.get = g5_cpufreq_get_speed,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
|
||||
|
@ -505,7 +504,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
|
|||
continue;
|
||||
if (strcmp(loc, "CPU CLOCK"))
|
||||
continue;
|
||||
if (!of_get_property(hwclock, "platform-get-frequency", NULL))
|
||||
if (!of_property_present(hwclock, "platform-get-frequency"))
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
@ -671,4 +670,5 @@ static int __init g5_cpufreq_init(void)
|
|||
module_init(g5_cpufreq_init);
|
||||
|
||||
|
||||
MODULE_DESCRIPTION("cpufreq driver for SMU & 970FX based G5 Macs");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -219,7 +219,7 @@ have_busfreq:
|
|||
}
|
||||
|
||||
|
||||
static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void powernow_k6_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
|
@ -234,10 +234,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
|
|||
cpufreq_freq_transition_begin(policy, &freqs);
|
||||
powernow_k6_target(policy, i);
|
||||
cpufreq_freq_transition_end(policy, &freqs, 0);
|
||||
break;
|
||||
return;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int powernow_k6_get(unsigned int cpu)
|
||||
|
@ -254,7 +253,6 @@ static struct cpufreq_driver powernow_k6_driver = {
|
|||
.exit = powernow_k6_cpu_exit,
|
||||
.get = powernow_k6_get,
|
||||
.name = "powernow-k6",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id powernow_k6_ids[] = {
|
||||
|
|
|
@ -644,7 +644,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int powernow_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void powernow_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
|
||||
if (acpi_processor_perf) {
|
||||
|
@ -655,7 +655,6 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
|
|||
#endif
|
||||
|
||||
kfree(powernow_table);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver powernow_driver = {
|
||||
|
@ -668,7 +667,6 @@ static struct cpufreq_driver powernow_driver = {
|
|||
.init = powernow_cpu_init,
|
||||
.exit = powernow_cpu_exit,
|
||||
.name = "powernow-k7",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int __init powernow_init(void)
|
||||
|
|
|
@ -1089,13 +1089,13 @@ err_out:
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
|
||||
static void powernowk8_cpu_exit(struct cpufreq_policy *pol)
|
||||
{
|
||||
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
|
||||
int cpu;
|
||||
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
return;
|
||||
|
||||
powernow_k8_cpu_exit_acpi(data);
|
||||
|
||||
|
@ -1104,8 +1104,6 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
|
|||
/* pol->cpus will be empty here, use related_cpus instead. */
|
||||
for_each_cpu(cpu, pol->related_cpus)
|
||||
per_cpu(powernow_data, cpu) = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void query_values_on_cpu(void *_err)
|
||||
|
@ -1145,7 +1143,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
|
|||
.exit = powernowk8_cpu_exit,
|
||||
.get = powernowk8_get,
|
||||
.name = "powernow-k8",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static void __request_acpi_cpufreq(void)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <trace/events/power.h>
|
||||
|
@ -281,7 +282,7 @@ next:
|
|||
pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
|
||||
pstate_nominal, pstate_max);
|
||||
pr_info("Workload Optimized Frequency is %s in the platform\n",
|
||||
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
|
||||
str_enabled_disabled(powernv_pstate_info.wof_enabled));
|
||||
|
||||
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
|
||||
if (!pstate_ids) {
|
||||
|
@ -692,7 +693,7 @@ static void gpstate_timer_handler(struct timer_list *t)
|
|||
}
|
||||
|
||||
/*
|
||||
* If PMCR was last updated was using fast_swtich then
|
||||
* If PMCR was last updated was using fast_switch then
|
||||
* We may have wrong in gpstate->last_lpstate_idx
|
||||
* value. Hence, read from PMCR to get correct data.
|
||||
*/
|
||||
|
@ -874,7 +875,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct powernv_smp_call_data freq_data;
|
||||
struct global_pstate_info *gpstates = policy->driver_data;
|
||||
|
@ -886,8 +887,6 @@ static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
|||
del_timer_sync(&gpstates->timer);
|
||||
|
||||
kfree(policy->driver_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
|
||||
|
@ -1129,7 +1128,7 @@ static int __init powernv_cpufreq_init(void)
|
|||
goto out;
|
||||
|
||||
if (powernv_pstate_info.wof_enabled)
|
||||
powernv_cpufreq_driver.boost_enabled = true;
|
||||
powernv_cpufreq_driver.set_boost = cpufreq_boost_set_sw;
|
||||
else
|
||||
powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
|
||||
|
||||
|
@ -1139,9 +1138,6 @@ static int __init powernv_cpufreq_init(void)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
if (powernv_pstate_info.wof_enabled)
|
||||
cpufreq_enable_boost_support();
|
||||
|
||||
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
|
||||
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
|
||||
|
||||
|
@ -1162,5 +1158,6 @@ static void __exit powernv_cpufreq_exit(void)
|
|||
}
|
||||
module_exit(powernv_cpufreq_exit);
|
||||
|
||||
MODULE_DESCRIPTION("cpufreq driver for IBM/OpenPOWER powernv systems");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
|
||||
|
|
|
@ -113,10 +113,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
cbe_cpufreq_pmi_policy_exit(policy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
|
||||
|
@ -169,5 +168,6 @@ static void __exit cbe_cpufreq_exit(void)
|
|||
module_init(cbe_cpufreq_init);
|
||||
module_exit(cbe_cpufreq_exit);
|
||||
|
||||
MODULE_DESCRIPTION("cpufreq driver for Cell BE processors");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/interconnect.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -142,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
|
|||
}
|
||||
|
||||
/* Get the frequency requested by the cpufreq core for the CPU */
|
||||
static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
|
||||
static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct qcom_cpufreq_data *data;
|
||||
const struct qcom_cpufreq_soc_data *soc_data;
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int index;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
|
@ -162,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
|
|||
return policy->freq_table[index].frequency;
|
||||
}
|
||||
|
||||
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
|
||||
static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct qcom_cpufreq_data *data;
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
|
@ -176,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
|
|||
if (data->throttle_irq >= 0)
|
||||
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
|
||||
|
||||
return qcom_cpufreq_get_freq(cpu);
|
||||
return qcom_cpufreq_get_freq(policy);
|
||||
}
|
||||
|
||||
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
|
||||
{
|
||||
return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
|
||||
}
|
||||
|
||||
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
|
||||
|
@ -304,7 +306,7 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
|
|||
struct of_phandle_args args;
|
||||
int cpu, ret;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
cpu_np = of_cpu_device_node_get(cpu);
|
||||
if (!cpu_np)
|
||||
continue;
|
||||
|
@ -362,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
|
|||
* If h/w throttled frequency is higher than what cpufreq has requested
|
||||
* for, then stop polling and switch back to interrupt mechanism.
|
||||
*/
|
||||
if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
|
||||
if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
|
||||
enable_irq(data->throttle_irq);
|
||||
else
|
||||
mod_delayed_work(system_highpri_wq, &data->throttle_work,
|
||||
|
@ -440,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
|
|||
return data->throttle_irq;
|
||||
|
||||
data->cancel_throttle = false;
|
||||
data->policy = policy;
|
||||
|
||||
mutex_init(&data->throttle_lock);
|
||||
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
|
||||
|
@ -551,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
policy->driver_data = data;
|
||||
policy->dvfs_possible_from_any_cpu = true;
|
||||
data->policy = policy;
|
||||
|
||||
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
|
||||
if (ret) {
|
||||
|
@ -564,16 +566,10 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (policy_has_boost_freq(policy)) {
|
||||
ret = cpufreq_enable_boost_support();
|
||||
if (ret)
|
||||
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
|
||||
}
|
||||
|
||||
return qcom_cpufreq_hw_lmh_init(policy, index);
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct device *cpu_dev = get_cpu_device(policy->cpu);
|
||||
struct qcom_cpufreq_data *data = policy->driver_data;
|
||||
|
@ -583,8 +579,6 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
|
|||
qcom_cpufreq_hw_lmh_exit(data);
|
||||
kfree(policy->freq_table);
|
||||
kfree(data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
|
||||
|
@ -595,12 +589,6 @@ static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
|
|||
enable_irq(data->throttle_irq);
|
||||
}
|
||||
|
||||
static struct freq_attr *qcom_cpufreq_hw_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
&cpufreq_freq_attr_scaling_boost_freqs,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
|
||||
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
|
||||
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
|
||||
|
@ -615,19 +603,32 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
|
|||
.register_em = cpufreq_register_em_with_opp,
|
||||
.fast_switch = qcom_cpufreq_hw_fast_switch,
|
||||
.name = "qcom-cpufreq-hw",
|
||||
.attr = qcom_cpufreq_hw_attr,
|
||||
.ready = qcom_cpufreq_ready,
|
||||
.set_boost = cpufreq_boost_set_sw,
|
||||
};
|
||||
|
||||
static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
||||
{
|
||||
struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
|
||||
|
||||
return qcom_lmh_get_throttle_freq(data);
|
||||
return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we cannot determine the closest rate of the target rate, let's just
|
||||
* return the actual rate at which the clock is running at. This is needed to
|
||||
* make clk_set_rate() API work properly.
|
||||
*/
|
||||
static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
|
||||
{
|
||||
req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
|
||||
.recalc_rate = qcom_cpufreq_hw_recalc_rate,
|
||||
.determine_rate = qcom_cpufreq_hw_determine_rate,
|
||||
};
|
||||
|
||||
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -191,6 +191,7 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
|
|||
case QCOM_ID_IPQ5312:
|
||||
case QCOM_ID_IPQ5302:
|
||||
case QCOM_ID_IPQ5300:
|
||||
case QCOM_ID_IPQ5321:
|
||||
case QCOM_ID_IPQ9514:
|
||||
case QCOM_ID_IPQ9550:
|
||||
case QCOM_ID_IPQ9554:
|
||||
|
@ -455,7 +456,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct qcom_cpufreq_drv *drv;
|
||||
struct nvmem_cell *speedbin_nvmem;
|
||||
struct device_node *np;
|
||||
struct device *cpu_dev;
|
||||
char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
|
||||
char *pvs_name = pvs_name_buffer;
|
||||
|
@ -467,16 +467,15 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
|||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
|
||||
struct device_node *np __free(device_node) =
|
||||
dev_pm_opp_of_get_opp_desc_node(cpu_dev);
|
||||
if (!np)
|
||||
return -ENOENT;
|
||||
|
||||
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") ||
|
||||
of_device_is_compatible(np, "operating-points-v2-krait-cpu");
|
||||
if (!ret) {
|
||||
of_node_put(np);
|
||||
if (!ret)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
|
||||
GFP_KERNEL);
|
||||
|
@ -502,9 +501,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
|||
}
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
}
|
||||
of_node_put(np);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
struct device **virt_devs = NULL;
|
||||
struct dev_pm_opp_config config = {
|
||||
.supported_hw = NULL,
|
||||
|
@ -571,7 +569,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
|||
dev_err(cpu_dev, "Failed to register platform device\n");
|
||||
|
||||
free_opp:
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
qcom_cpufreq_put_virt_devs(drv, cpu);
|
||||
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
|
||||
}
|
||||
|
@ -585,7 +583,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
|
|||
|
||||
platform_device_unregister(cpufreq_dt_pdev);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
qcom_cpufreq_put_virt_devs(drv, cpu);
|
||||
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
|
||||
}
|
||||
|
@ -596,7 +594,7 @@ static int qcom_cpufreq_suspend(struct device *dev)
|
|||
struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
for_each_present_cpu(cpu)
|
||||
qcom_cpufreq_suspend_virt_devs(drv, cpu);
|
||||
|
||||
return 0;
|
||||
|
@ -613,7 +611,7 @@ static struct platform_driver qcom_cpufreq_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
|
||||
static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_unused = {
|
||||
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
|
||||
{ .compatible = "qcom,msm8909", .data = &match_data_msm8909 },
|
||||
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
|
||||
|
@ -638,7 +636,7 @@ MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list);
|
|||
*/
|
||||
static int __init qcom_cpufreq_init(void)
|
||||
{
|
||||
struct device_node *np = of_find_node_by_path("/");
|
||||
struct device_node *np __free(device_node) = of_find_node_by_path("/");
|
||||
const struct of_device_id *match;
|
||||
int ret;
|
||||
|
||||
|
@ -646,7 +644,6 @@ static int __init qcom_cpufreq_init(void)
|
|||
return -ENODEV;
|
||||
|
||||
match = of_match_node(qcom_cpufreq_match_list, np);
|
||||
of_node_put(np);
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ err_np:
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpu_data *data = policy->driver_data;
|
||||
|
||||
|
@ -233,8 +233,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
|||
kfree(data->table);
|
||||
kfree(data);
|
||||
policy->driver_data = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
|
||||
|
@ -256,7 +254,6 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
|
|||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = qoriq_cpufreq_target,
|
||||
.get = cpufreq_generic_get,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static const struct of_device_id qoriq_cpufreq_blacklist[] = {
|
||||
|
|
|
@ -24,6 +24,7 @@ struct s3c64xx_dvfs {
|
|||
unsigned int vddarm_max;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
|
||||
[0] = { 1000000, 1150000 },
|
||||
[1] = { 1050000, 1150000 },
|
||||
|
@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
|
|||
[3] = { 1200000, 1350000 },
|
||||
[4] = { 1300000, 1350000 },
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
|
||||
{ 0, 0, 66000 },
|
||||
|
@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
|
|||
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
|
||||
unsigned int index)
|
||||
{
|
||||
struct s3c64xx_dvfs *dvfs;
|
||||
unsigned int old_freq, new_freq;
|
||||
unsigned int new_freq = s3c64xx_freq_table[index].frequency;
|
||||
int ret;
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
struct s3c64xx_dvfs *dvfs;
|
||||
unsigned int old_freq;
|
||||
|
||||
old_freq = clk_get_rate(policy->clk) / 1000;
|
||||
new_freq = s3c64xx_freq_table[index].frequency;
|
||||
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
if (vddarm && new_freq > old_freq) {
|
||||
ret = regulator_set_voltage(vddarm,
|
||||
dvfs->vddarm_min,
|
||||
|
|
|
@ -92,7 +92,6 @@ static struct cpufreq_driver sc520_freq_driver = {
|
|||
.target_index = sc520_freq_target,
|
||||
.init = sc520_freq_cpu_init,
|
||||
.name = "sc520_freq",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id sc520_ids[] = {
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/scmi_protocol.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -26,6 +27,8 @@ struct scmi_data {
|
|||
int nr_opp;
|
||||
struct device *cpu_dev;
|
||||
cpumask_var_t opp_shared_cpus;
|
||||
struct notifier_block limit_notify_nb;
|
||||
struct freq_qos_request limits_freq_req;
|
||||
};
|
||||
|
||||
static struct scmi_protocol_handle *ph;
|
||||
|
@ -34,11 +37,17 @@ static struct cpufreq_driver scmi_cpufreq_driver;
|
|||
|
||||
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
struct scmi_data *priv = policy->driver_data;
|
||||
struct cpufreq_policy *policy;
|
||||
struct scmi_data *priv;
|
||||
unsigned long rate;
|
||||
int ret;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
|
||||
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
@ -63,9 +72,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
|||
unsigned int target_freq)
|
||||
{
|
||||
struct scmi_data *priv = policy->driver_data;
|
||||
unsigned long freq = target_freq;
|
||||
|
||||
if (!perf_ops->freq_set(ph, priv->domain_id,
|
||||
target_freq * 1000, true))
|
||||
if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true))
|
||||
return target_freq;
|
||||
|
||||
return 0;
|
||||
|
@ -101,7 +110,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, int domain,
|
|||
int cpu, tdomain;
|
||||
struct device *tcpu_dev;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
if (cpu == cpu_dev->id)
|
||||
continue;
|
||||
|
||||
|
@ -168,11 +177,21 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
|
|||
return rate_limit;
|
||||
}
|
||||
|
||||
static struct freq_attr *scmi_cpufreq_hw_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL,
|
||||
NULL,
|
||||
};
|
||||
static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
|
||||
{
|
||||
struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
|
||||
struct scmi_perf_limits_report *limit_notify = data;
|
||||
unsigned int limit_freq_khz;
|
||||
int ret;
|
||||
|
||||
limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ;
|
||||
|
||||
ret = freq_qos_update_request(&priv->limits_freq_req, limit_freq_khz);
|
||||
if (ret < 0)
|
||||
pr_warn("failed to update freq constraint: %d\n", ret);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
|
@ -181,6 +200,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
|
|||
struct device *cpu_dev;
|
||||
struct scmi_data *priv;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
struct scmi_device *sdev = cpufreq_get_driver_data();
|
||||
|
||||
cpu_dev = get_cpu_device(policy->cpu);
|
||||
if (!cpu_dev) {
|
||||
|
@ -283,19 +303,27 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->transition_delay_us =
|
||||
scmi_get_rate_limit(domain, policy->fast_switch_possible);
|
||||
|
||||
if (policy_has_boost_freq(policy)) {
|
||||
ret = cpufreq_enable_boost_support();
|
||||
if (ret) {
|
||||
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
|
||||
goto out_free_opp;
|
||||
} else {
|
||||
scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
|
||||
scmi_cpufreq_driver.boost_enabled = true;
|
||||
}
|
||||
ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
|
||||
FREQ_QOS_MAX_DEFAULT_VALUE);
|
||||
if (ret < 0) {
|
||||
dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret);
|
||||
goto out_free_table;
|
||||
}
|
||||
|
||||
priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
|
||||
ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF,
|
||||
SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
|
||||
&priv->domain_id,
|
||||
&priv->limit_notify_nb);
|
||||
if (ret)
|
||||
dev_warn(&sdev->dev,
|
||||
"failed to register for limits change notifier for domain %d\n",
|
||||
priv->domain_id);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_table:
|
||||
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
out_free_opp:
|
||||
dev_pm_opp_remove_all_dynamic(cpu_dev);
|
||||
|
||||
|
@ -308,16 +336,20 @@ out_free_priv:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct scmi_data *priv = policy->driver_data;
|
||||
struct scmi_device *sdev = cpufreq_get_driver_data();
|
||||
|
||||
sdev->handle->notify_ops->event_notifier_unregister(sdev->handle, SCMI_PROTOCOL_PERF,
|
||||
SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
|
||||
&priv->domain_id,
|
||||
&priv->limit_notify_nb);
|
||||
freq_qos_remove_request(&priv->limits_freq_req);
|
||||
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
|
||||
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
|
||||
free_cpumask_var(priv->opp_shared_cpus);
|
||||
kfree(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
|
||||
|
@ -352,13 +384,13 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
|
|||
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
|
||||
CPUFREQ_IS_COOLING_DEV,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.attr = scmi_cpufreq_hw_attr,
|
||||
.target_index = scmi_cpufreq_set_target,
|
||||
.fast_switch = scmi_cpufreq_fast_switch,
|
||||
.get = scmi_cpufreq_get_rate,
|
||||
.init = scmi_cpufreq_init,
|
||||
.exit = scmi_cpufreq_exit,
|
||||
.register_em = scmi_cpufreq_register_em,
|
||||
.set_boost = cpufreq_boost_set_sw,
|
||||
};
|
||||
|
||||
static int scmi_cpufreq_probe(struct scmi_device *sdev)
|
||||
|
@ -372,6 +404,8 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
|
|||
if (!handle)
|
||||
return -ENODEV;
|
||||
|
||||
scmi_cpufreq_driver.driver_data = sdev;
|
||||
|
||||
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
|
||||
if (IS_ERR(perf_ops))
|
||||
return PTR_ERR(perf_ops);
|
||||
|
|
|
@ -37,9 +37,16 @@ static struct scpi_ops *scpi_ops;
|
|||
|
||||
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
struct scpi_data *priv = policy->driver_data;
|
||||
unsigned long rate = clk_get_rate(priv->clk);
|
||||
struct cpufreq_policy *policy;
|
||||
struct scpi_data *priv;
|
||||
unsigned long rate;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
rate = clk_get_rate(priv->clk);
|
||||
|
||||
return rate / 1000;
|
||||
}
|
||||
|
@ -47,8 +54,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
|
|||
static int
|
||||
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
{
|
||||
u64 rate = policy->freq_table[index].frequency * 1000;
|
||||
unsigned long freq_khz = policy->freq_table[index].frequency;
|
||||
struct scpi_data *priv = policy->driver_data;
|
||||
unsigned long rate = freq_khz * 1000;
|
||||
int ret;
|
||||
|
||||
ret = clk_set_rate(priv->clk, rate);
|
||||
|
@ -56,7 +64,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (clk_get_rate(priv->clk) != rate)
|
||||
if (clk_get_rate(priv->clk) / 1000 != freq_khz)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
|
@ -72,7 +80,7 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
|
|||
if (domain < 0)
|
||||
return domain;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
if (cpu == cpu_dev->id)
|
||||
continue;
|
||||
|
||||
|
@ -177,7 +185,7 @@ out_free_opp:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
static void scpi_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct scpi_data *priv = policy->driver_data;
|
||||
|
||||
|
@ -185,8 +193,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
|
|||
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
|
||||
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
|
||||
kfree(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver scpi_cpufreq_driver = {
|
||||
|
@ -195,7 +201,6 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
|
|||
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
|
||||
CPUFREQ_IS_COOLING_DEV,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.attr = cpufreq_generic_attr,
|
||||
.get = scpi_cpufreq_get_rate,
|
||||
.init = scpi_cpufreq_init,
|
||||
.exit = scpi_cpufreq_exit,
|
||||
|
|
|
@ -135,14 +135,12 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
|
||||
|
||||
clk_put(cpuclk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sh_cpufreq_cpu_ready(struct cpufreq_policy *policy)
|
||||
|
|
|
@ -165,16 +165,14 @@ static struct cpufreq_driver spear_cpufreq_driver = {
|
|||
.target_index = spear_cpufreq_target,
|
||||
.get = cpufreq_generic_get,
|
||||
.init = spear_cpufreq_init,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int spear_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np;
|
||||
const struct property *prop;
|
||||
struct cpufreq_frequency_table *freq_tbl;
|
||||
const __be32 *val;
|
||||
int cnt, i, ret;
|
||||
u32 val;
|
||||
int cnt, ret, i = 0;
|
||||
|
||||
np = of_cpu_device_node_get(0);
|
||||
if (!np) {
|
||||
|
@ -186,26 +184,23 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
|
|||
&spear_cpufreq.transition_latency))
|
||||
spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
|
||||
|
||||
prop = of_find_property(np, "cpufreq_tbl", NULL);
|
||||
if (!prop || !prop->value) {
|
||||
cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
|
||||
if (cnt <= 0) {
|
||||
pr_err("Invalid cpufreq_tbl\n");
|
||||
ret = -ENODEV;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
cnt = prop->length / sizeof(u32);
|
||||
val = prop->value;
|
||||
|
||||
freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL);
|
||||
if (!freq_tbl) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++)
|
||||
freq_tbl[i].frequency = be32_to_cpup(val++);
|
||||
of_property_for_each_u32(np, "cpufreq_tbl", val)
|
||||
freq_tbl[i++].frequency = val;
|
||||
|
||||
freq_tbl[i].frequency = CPUFREQ_TABLE_END;
|
||||
freq_tbl[cnt].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
spear_cpufreq.freq_tbl = freq_tbl;
|
||||
|
||||
|
|
|
@ -400,16 +400,12 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int centrino_cpu_exit(struct cpufreq_policy *policy)
|
||||
static void centrino_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
|
||||
if (!per_cpu(centrino_model, cpu))
|
||||
return -ENODEV;
|
||||
|
||||
per_cpu(centrino_model, cpu) = NULL;
|
||||
|
||||
return 0;
|
||||
if (per_cpu(centrino_model, cpu))
|
||||
per_cpu(centrino_model, cpu) = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <linux/regmap.h>
|
||||
|
||||
#define VERSION_ELEMENTS 3
|
||||
#define MAX_PCODE_NAME_LEN 7
|
||||
#define MAX_PCODE_NAME_LEN 16
|
||||
|
||||
#define VERSION_SHIFT 28
|
||||
#define HW_INFO_INDEX 1
|
||||
|
@ -274,7 +274,7 @@ static int __init sti_cpufreq_init(void)
|
|||
goto skip_voltage_scaling;
|
||||
}
|
||||
|
||||
if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) {
|
||||
if (!of_property_present(ddata.cpu->of_node, "operating-points-v2")) {
|
||||
dev_err(ddata.cpu, "OPP-v2 not supported\n");
|
||||
goto skip_voltage_scaling;
|
||||
}
|
||||
|
@ -300,6 +300,7 @@ module_init(sti_cpufreq_init);
|
|||
static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
|
||||
{ .compatible = "st,stih407" },
|
||||
{ .compatible = "st,stih410" },
|
||||
{ .compatible = "st,stih418" },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
|
||||
|
|
|
@ -102,7 +102,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
|
|||
|
||||
snprintf(name, MAX_NAME_LEN, "speed%d", speed);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_present_cpu(cpu) {
|
||||
struct device *cpu_dev = get_cpu_device(cpu);
|
||||
|
||||
if (!cpu_dev) {
|
||||
|
@ -129,7 +129,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
|
|||
pr_err("Failed to register platform device\n");
|
||||
|
||||
free_opp:
|
||||
for_each_possible_cpu(cpu)
|
||||
for_each_present_cpu(cpu)
|
||||
dev_pm_opp_put_prop_name(opp_tokens[cpu]);
|
||||
kfree(opp_tokens);
|
||||
|
||||
|
@ -143,7 +143,7 @@ static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
|
|||
|
||||
platform_device_unregister(cpufreq_dt_pdev);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
for_each_present_cpu(cpu)
|
||||
dev_pm_opp_put_prop_name(opp_tokens[cpu]);
|
||||
|
||||
kfree(opp_tokens);
|
||||
|
|
|
@ -52,12 +52,15 @@ out:
|
|||
|
||||
static int tegra124_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
|
||||
struct tegra124_cpufreq_priv *priv;
|
||||
struct device_node *np;
|
||||
struct device *cpu_dev;
|
||||
struct platform_device_info cpufreq_dt_devinfo = {};
|
||||
int ret;
|
||||
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
@ -66,15 +69,9 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
|
|||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
np = of_cpu_device_node_get(0);
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
|
||||
if (IS_ERR(priv->cpu_clk)) {
|
||||
ret = PTR_ERR(priv->cpu_clk);
|
||||
goto out_put_np;
|
||||
}
|
||||
if (IS_ERR(priv->cpu_clk))
|
||||
return PTR_ERR(priv->cpu_clk);
|
||||
|
||||
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
|
||||
if (IS_ERR(priv->dfll_clk)) {
|
||||
|
@ -110,8 +107,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
return 0;
|
||||
|
||||
out_put_pllp_clk:
|
||||
|
@ -122,8 +117,6 @@ out_put_dfll_clk:
|
|||
clk_put(priv->dfll_clk);
|
||||
out_put_cpu_clk:
|
||||
clk_put(priv->cpu_clk);
|
||||
out_put_np:
|
||||
of_node_put(np);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
|
||||
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
|
||||
u32 cpu;
|
||||
|
||||
policy->freq_table = data->clusters[cluster].table;
|
||||
policy->cpuinfo.transition_latency = 300 * 1000;
|
||||
policy->driver_data = NULL;
|
||||
|
||||
/* set same policy for all cpus in a cluster */
|
||||
for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
|
||||
if (data->cpus[cpu].bpmp_cluster_id == cluster)
|
||||
cpumask_set_cpu(cpu, policy->cpus);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -123,7 +130,6 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
|
|||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = tegra186_cpufreq_set_target,
|
||||
.init = tegra186_cpufreq_init,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static struct cpufreq_frequency_table *init_vhint_table(
|
||||
|
|
|
@ -535,14 +535,12 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tegra194_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
static void tegra194_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct device *cpu_dev = get_cpu_device(policy->cpu);
|
||||
|
||||
dev_pm_opp_remove_all_dynamic(cpu_dev);
|
||||
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra194_cpufreq_online(struct cpufreq_policy *policy)
|
||||
|
@ -590,7 +588,6 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
|
|||
.exit = tegra194_cpufreq_exit,
|
||||
.online = tegra194_cpufreq_online,
|
||||
.offline = tegra194_cpufreq_offline,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
|
||||
|
|
|
@ -295,7 +295,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ti_cpufreq_of_match[] = {
|
||||
static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
|
||||
{ .compatible = "ti,am33xx", .data = &am3x_soc_data, },
|
||||
{ .compatible = "ti,am3517", .data = &am3517_soc_data, },
|
||||
{ .compatible = "ti,am43", .data = &am4x_soc_data, },
|
||||
|
@ -312,12 +312,10 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
|
|||
|
||||
static const struct of_device_id *ti_cpufreq_match_node(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device_node *np __free(device_node) = of_find_node_by_path("/");
|
||||
const struct of_device_id *match;
|
||||
|
||||
np = of_find_node_by_path("/");
|
||||
match = of_match_node(ti_cpufreq_of_match, np);
|
||||
of_node_put(np);
|
||||
|
||||
return match;
|
||||
}
|
||||
|
|
|
@ -451,7 +451,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
static void ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
|
||||
|
@ -464,11 +464,10 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
|
|||
if (!cpu_dev) {
|
||||
pr_err("%s: failed to get cpu%d device\n", __func__,
|
||||
policy->cpu);
|
||||
return -ENODEV;
|
||||
return;
|
||||
}
|
||||
|
||||
put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
|
||||
|
@ -492,7 +491,6 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
|
|||
.init = ve_spc_cpufreq_init,
|
||||
.exit = ve_spc_cpufreq_exit,
|
||||
.ready = ve_spc_cpufreq_ready,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BL_SWITCHER
|
||||
|
|
|
@ -0,0 +1,333 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2024 Google LLC
|
||||
*/
|
||||
|
||||
#include <linux/arch_topology.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
* CPU0..CPUn
|
||||
* +-------------+-------------------------------+--------+-------+
|
||||
* | Register | Description | Offset | Len |
|
||||
* +-------------+-------------------------------+--------+-------+
|
||||
* | cur_perf | read this register to get | 0x0 | 0x4 |
|
||||
* | | the current perf (integer val | | |
|
||||
* | | representing perf relative to | | |
|
||||
* | | max performance) | | |
|
||||
* | | that vCPU is running at | | |
|
||||
* +-------------+-------------------------------+--------+-------+
|
||||
* | set_perf | write to this register to set | 0x4 | 0x4 |
|
||||
* | | perf value of the vCPU | | |
|
||||
* +-------------+-------------------------------+--------+-------+
|
||||
* | perftbl_len | number of entries in perf | 0x8 | 0x4 |
|
||||
* | | table. A single entry in the | | |
|
||||
* | | perf table denotes no table | | |
|
||||
* | | and the entry contains | | |
|
||||
* | | the maximum perf value | | |
|
||||
* | | that this vCPU supports. | | |
|
||||
* | | The guest can request any | | |
|
||||
* | | value between 1 and max perf | | |
|
||||
* | | when perftbls are not used. | | |
|
||||
* +---------------------------------------------+--------+-------+
|
||||
* | perftbl_sel | write to this register to | 0xc | 0x4 |
|
||||
* | | select perf table entry to | | |
|
||||
* | | read from | | |
|
||||
* +---------------------------------------------+--------+-------+
|
||||
* | perftbl_rd | read this register to get | 0x10 | 0x4 |
|
||||
* | | perf value of the selected | | |
|
||||
* | | entry based on perftbl_sel | | |
|
||||
* +---------------------------------------------+--------+-------+
|
||||
* | perf_domain | performance domain number | 0x14 | 0x4 |
|
||||
* | | that this vCPU belongs to. | | |
|
||||
* | | vCPUs sharing the same perf | | |
|
||||
* | | domain number are part of the | | |
|
||||
* | | same performance domain. | | |
|
||||
* +-------------+-------------------------------+--------+-------+
|
||||
*/
|
||||
|
||||
#define REG_CUR_PERF_STATE_OFFSET 0x0
|
||||
#define REG_SET_PERF_STATE_OFFSET 0x4
|
||||
#define REG_PERFTBL_LEN_OFFSET 0x8
|
||||
#define REG_PERFTBL_SEL_OFFSET 0xc
|
||||
#define REG_PERFTBL_RD_OFFSET 0x10
|
||||
#define REG_PERF_DOMAIN_OFFSET 0x14
|
||||
#define PER_CPU_OFFSET 0x1000
|
||||
|
||||
#define PERFTBL_MAX_ENTRIES 64U
|
||||
|
||||
static void __iomem *base;
|
||||
static DEFINE_PER_CPU(u32, perftbl_num_entries);
|
||||
|
||||
static void virt_scale_freq_tick(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
u32 max_freq = (u32)cpufreq_get_hw_max_freq(cpu);
|
||||
u64 cur_freq;
|
||||
unsigned long scale;
|
||||
|
||||
cur_freq = (u64)readl_relaxed(base + cpu * PER_CPU_OFFSET
|
||||
+ REG_CUR_PERF_STATE_OFFSET);
|
||||
|
||||
cur_freq <<= SCHED_CAPACITY_SHIFT;
|
||||
scale = (unsigned long)div_u64(cur_freq, max_freq);
|
||||
scale = min(scale, SCHED_CAPACITY_SCALE);
|
||||
|
||||
this_cpu_write(arch_freq_scale, scale);
|
||||
}
|
||||
|
||||
static struct scale_freq_data virt_sfd = {
|
||||
.source = SCALE_FREQ_SOURCE_VIRT,
|
||||
.set_freq_scale = virt_scale_freq_tick,
|
||||
};
|
||||
|
||||
static unsigned int virt_cpufreq_set_perf(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
writel_relaxed(target_freq,
|
||||
base + policy->cpu * PER_CPU_OFFSET + REG_SET_PERF_STATE_OFFSET);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int virt_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
virt_cpufreq_set_perf(policy, target_freq);
|
||||
return target_freq;
|
||||
}
|
||||
|
||||
static u32 virt_cpufreq_get_perftbl_entry(int cpu, u32 idx)
|
||||
{
|
||||
writel_relaxed(idx, base + cpu * PER_CPU_OFFSET +
|
||||
REG_PERFTBL_SEL_OFFSET);
|
||||
return readl_relaxed(base + cpu * PER_CPU_OFFSET +
|
||||
REG_PERFTBL_RD_OFFSET);
|
||||
}
|
||||
|
||||
static int virt_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
{
|
||||
struct cpufreq_freqs freqs;
|
||||
int ret = 0;
|
||||
|
||||
freqs.old = policy->cur;
|
||||
freqs.new = target_freq;
|
||||
|
||||
cpufreq_freq_transition_begin(policy, &freqs);
|
||||
ret = virt_cpufreq_set_perf(policy, target_freq);
|
||||
cpufreq_freq_transition_end(policy, &freqs, ret != 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy)
|
||||
{
|
||||
u32 cur_perf_domain, perf_domain;
|
||||
struct device *cpu_dev;
|
||||
int cpu;
|
||||
|
||||
cur_perf_domain = readl_relaxed(base + policy->cpu *
|
||||
PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev)
|
||||
continue;
|
||||
|
||||
perf_domain = readl_relaxed(base + cpu *
|
||||
PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
|
||||
|
||||
if (perf_domain == cur_perf_domain)
|
||||
cpumask_set_cpu(cpu, policy->cpus);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virt_cpufreq_get_freq_info(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_frequency_table *table;
|
||||
u32 num_perftbl_entries, idx;
|
||||
|
||||
num_perftbl_entries = per_cpu(perftbl_num_entries, policy->cpu);
|
||||
|
||||
if (num_perftbl_entries == 1) {
|
||||
policy->cpuinfo.min_freq = 1;
|
||||
policy->cpuinfo.max_freq = virt_cpufreq_get_perftbl_entry(policy->cpu, 0);
|
||||
|
||||
policy->min = policy->cpuinfo.min_freq;
|
||||
policy->max = policy->cpuinfo.max_freq;
|
||||
|
||||
policy->cur = policy->max;
|
||||
return 0;
|
||||
}
|
||||
|
||||
table = kcalloc(num_perftbl_entries + 1, sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (idx = 0; idx < num_perftbl_entries; idx++)
|
||||
table[idx].frequency = virt_cpufreq_get_perftbl_entry(policy->cpu, idx);
|
||||
|
||||
table[idx].frequency = CPUFREQ_TABLE_END;
|
||||
policy->freq_table = table;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virt_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(policy->cpu);
|
||||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
ret = virt_cpufreq_get_freq_info(policy);
|
||||
if (ret) {
|
||||
dev_warn(cpu_dev, "failed to get cpufreq info\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = virt_cpufreq_get_sharing_cpus(policy);
|
||||
if (ret) {
|
||||
dev_warn(cpu_dev, "failed to get sharing cpumask\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* To simplify and improve latency of handling frequency requests on
|
||||
* the host side, this ensures that the vCPU thread triggering the MMIO
|
||||
* abort is the same thread whose performance constraints (Ex. uclamp
|
||||
* settings) need to be updated. This simplifies the VMM (Virtual
|
||||
* Machine Manager) having to find the correct vCPU thread and/or
|
||||
* facing permission issues when configuring other threads.
|
||||
*/
|
||||
policy->dvfs_possible_from_any_cpu = false;
|
||||
policy->fast_switch_possible = true;
|
||||
|
||||
/*
|
||||
* Using the default SCALE_FREQ_SOURCE_CPUFREQ is insufficient since
|
||||
* the actual physical CPU frequency may not match requested frequency
|
||||
* from the vCPU thread due to frequency update latencies or other
|
||||
* inputs to the physical CPU frequency selection. This additional FIE
|
||||
* source allows for more accurate freq_scale updates and only takes
|
||||
* effect if another FIE source such as AMUs have not been registered.
|
||||
*/
|
||||
topology_set_scale_freq_source(&virt_sfd, policy->cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virt_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_VIRT, policy->related_cpus);
|
||||
kfree(policy->freq_table);
|
||||
}
|
||||
|
||||
static int virt_cpufreq_online(struct cpufreq_policy *policy)
|
||||
{
|
||||
/* Nothing to restore. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virt_cpufreq_offline(struct cpufreq_policy *policy)
|
||||
{
|
||||
/* Dummy offline() to avoid exit() being called and freeing resources. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
if (policy->freq_table)
|
||||
return cpufreq_frequency_table_verify(policy, policy->freq_table);
|
||||
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver cpufreq_virt_driver = {
|
||||
.name = "virt-cpufreq",
|
||||
.init = virt_cpufreq_cpu_init,
|
||||
.exit = virt_cpufreq_cpu_exit,
|
||||
.online = virt_cpufreq_online,
|
||||
.offline = virt_cpufreq_offline,
|
||||
.verify = virt_cpufreq_verify_policy,
|
||||
.target = virt_cpufreq_target,
|
||||
.fast_switch = virt_cpufreq_fast_switch,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static int virt_cpufreq_driver_probe(struct platform_device *pdev)
|
||||
{
|
||||
u32 num_perftbl_entries;
|
||||
int ret, cpu;
|
||||
|
||||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
num_perftbl_entries = readl_relaxed(base + cpu * PER_CPU_OFFSET +
|
||||
REG_PERFTBL_LEN_OFFSET);
|
||||
|
||||
if (!num_perftbl_entries || num_perftbl_entries > PERFTBL_MAX_ENTRIES)
|
||||
return -ENODEV;
|
||||
|
||||
per_cpu(perftbl_num_entries, cpu) = num_perftbl_entries;
|
||||
}
|
||||
|
||||
ret = cpufreq_register_driver(&cpufreq_virt_driver);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Virtual CPUFreq driver failed to register: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_dbg(&pdev->dev, "Virtual CPUFreq driver initialized\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virt_cpufreq_driver_remove(struct platform_device *pdev)
|
||||
{
|
||||
cpufreq_unregister_driver(&cpufreq_virt_driver);
|
||||
}
|
||||
|
||||
static const struct of_device_id virt_cpufreq_match[] = {
|
||||
{ .compatible = "qemu,virtual-cpufreq", .data = NULL},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, virt_cpufreq_match);
|
||||
|
||||
static struct platform_driver virt_cpufreq_driver = {
|
||||
.probe = virt_cpufreq_driver_probe,
|
||||
.remove_new = virt_cpufreq_driver_remove,
|
||||
.driver = {
|
||||
.name = "virt-cpufreq",
|
||||
.of_match_table = virt_cpufreq_match,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init virt_cpufreq_init(void)
|
||||
{
|
||||
return platform_driver_register(&virt_cpufreq_driver);
|
||||
}
|
||||
postcore_initcall(virt_cpufreq_init);
|
||||
|
||||
static void __exit virt_cpufreq_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&virt_cpufreq_driver);
|
||||
}
|
||||
module_exit(virt_cpufreq_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Virtual cpufreq driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -301,6 +301,7 @@ static const struct soc_id soc_id[] = {
|
|||
{ qcom_board_id(QRU1032) },
|
||||
{ qcom_board_id(QRU1052) },
|
||||
{ qcom_board_id(QRU1062) },
|
||||
{ qcom_board_id(IPQ5321) },
|
||||
};
|
||||
|
||||
static const char *socinfo_machine(struct device *dev, unsigned int id)
|
||||
|
|
|
@ -270,6 +270,7 @@
|
|||
#define QCOM_ID_QCS8550 603
|
||||
#define QCOM_ID_QCM8550 604
|
||||
#define QCOM_ID_IPQ5300 624
|
||||
#define QCOM_ID_IPQ5321 650
|
||||
|
||||
/*
|
||||
* The board type and revision information, used by Qualcomm bootloaders and
|
||||
|
|
|
@ -45,6 +45,7 @@ enum scale_freq_source {
|
|||
SCALE_FREQ_SOURCE_CPUFREQ = 0,
|
||||
SCALE_FREQ_SOURCE_ARCH,
|
||||
SCALE_FREQ_SOURCE_CPPC,
|
||||
SCALE_FREQ_SOURCE_VIRT,
|
||||
};
|
||||
|
||||
struct scale_freq_data {
|
||||
|
|
|
@ -143,6 +143,9 @@ struct cpufreq_policy {
|
|||
/* Per policy boost enabled flag. */
|
||||
bool boost_enabled;
|
||||
|
||||
/* Per policy boost supported flag. */
|
||||
bool boost_supported;
|
||||
|
||||
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
|
||||
unsigned int cached_target_freq;
|
||||
unsigned int cached_resolved_idx;
|
||||
|
@ -398,7 +401,7 @@ struct cpufreq_driver {
|
|||
|
||||
int (*online)(struct cpufreq_policy *policy);
|
||||
int (*offline)(struct cpufreq_policy *policy);
|
||||
int (*exit)(struct cpufreq_policy *policy);
|
||||
void (*exit)(struct cpufreq_policy *policy);
|
||||
int (*suspend)(struct cpufreq_policy *policy);
|
||||
int (*resume)(struct cpufreq_policy *policy);
|
||||
|
||||
|
@ -809,8 +812,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
|
|||
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
|
||||
|
||||
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
unsigned int target_freq, unsigned int min,
|
||||
unsigned int max, unsigned int relation);
|
||||
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
||||
unsigned int freq);
|
||||
|
||||
|
@ -818,9 +821,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
|
|||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
int cpufreq_boost_trigger_state(int state);
|
||||
int cpufreq_boost_enabled(void);
|
||||
int cpufreq_enable_boost_support(void);
|
||||
bool policy_has_boost_freq(struct cpufreq_policy *policy);
|
||||
bool cpufreq_boost_enabled(void);
|
||||
int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state);
|
||||
|
||||
/* Find lowest freq at or above target in a table in ascending order */
|
||||
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
|
||||
|
@ -875,12 +877,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
|
|||
return best;
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
bool efficiencies)
|
||||
static inline int find_index_l(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int min, unsigned int max,
|
||||
bool efficiencies)
|
||||
{
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
target_freq = clamp_val(target_freq, min, max);
|
||||
|
||||
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
||||
return cpufreq_table_find_index_al(policy, target_freq,
|
||||
|
@ -890,6 +892,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
|
|||
efficiencies);
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
bool efficiencies)
|
||||
{
|
||||
return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
|
||||
}
|
||||
|
||||
/* Find highest freq at or below target in a table in ascending order */
|
||||
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
|
@ -943,12 +953,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
|
|||
return best;
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
bool efficiencies)
|
||||
static inline int find_index_h(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int min, unsigned int max,
|
||||
bool efficiencies)
|
||||
{
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
target_freq = clamp_val(target_freq, min, max);
|
||||
|
||||
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
||||
return cpufreq_table_find_index_ah(policy, target_freq,
|
||||
|
@ -958,6 +968,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
|
|||
efficiencies);
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
bool efficiencies)
|
||||
{
|
||||
return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
|
||||
}
|
||||
|
||||
/* Find closest freq to target in a table in ascending order */
|
||||
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
|
@ -1028,12 +1046,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
|
|||
return best;
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
bool efficiencies)
|
||||
static inline int find_index_c(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int min, unsigned int max,
|
||||
bool efficiencies)
|
||||
{
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
target_freq = clamp_val(target_freq, min, max);
|
||||
|
||||
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
||||
return cpufreq_table_find_index_ac(policy, target_freq,
|
||||
|
@ -1043,8 +1061,32 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
|
|||
efficiencies);
|
||||
}
|
||||
|
||||
/* Works only on sorted freq-tables */
|
||||
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
bool efficiencies)
|
||||
{
|
||||
return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
|
||||
}
|
||||
|
||||
static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
|
||||
unsigned int min, unsigned int max,
|
||||
int idx)
|
||||
{
|
||||
unsigned int freq;
|
||||
|
||||
if (idx < 0)
|
||||
return false;
|
||||
|
||||
freq = policy->freq_table[idx].frequency;
|
||||
|
||||
return freq == clamp_val(freq, min, max);
|
||||
}
|
||||
|
||||
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int min,
|
||||
unsigned int max,
|
||||
unsigned int relation)
|
||||
{
|
||||
bool efficiencies = policy->efficiencies_available &&
|
||||
|
@ -1055,28 +1097,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|||
relation &= ~CPUFREQ_RELATION_E;
|
||||
|
||||
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
|
||||
return cpufreq_table_index_unsorted(policy, target_freq,
|
||||
relation);
|
||||
return cpufreq_table_index_unsorted(policy, target_freq, min,
|
||||
max, relation);
|
||||
retry:
|
||||
switch (relation) {
|
||||
case CPUFREQ_RELATION_L:
|
||||
idx = cpufreq_table_find_index_l(policy, target_freq,
|
||||
efficiencies);
|
||||
idx = find_index_l(policy, target_freq, min, max, efficiencies);
|
||||
break;
|
||||
case CPUFREQ_RELATION_H:
|
||||
idx = cpufreq_table_find_index_h(policy, target_freq,
|
||||
efficiencies);
|
||||
idx = find_index_h(policy, target_freq, min, max, efficiencies);
|
||||
break;
|
||||
case CPUFREQ_RELATION_C:
|
||||
idx = cpufreq_table_find_index_c(policy, target_freq,
|
||||
efficiencies);
|
||||
idx = find_index_c(policy, target_freq, min, max, efficiencies);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (idx < 0 && efficiencies) {
|
||||
/* Limit frequency index to honor min and max */
|
||||
if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
|
||||
efficiencies = false;
|
||||
goto retry;
|
||||
}
|
||||
|
@ -1185,21 +1225,16 @@ static inline int cpufreq_boost_trigger_state(int state)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int cpufreq_boost_enabled(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpufreq_enable_boost_support(void)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
||||
static inline bool cpufreq_boost_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int
|
||||
cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
|
||||
unsigned int frequency)
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
CONFIG_CPUFREQ_VIRT=m
|
Loading…
Reference in New Issue