Skip to content

Commit e14a3c7

Browse files
wzyy2yihsin-hung
authored andcommitted
REBASE: MALI: midgard: limit CPU's maximum frequency according to GPU's frequency
Change-Id: Id6ed4c7d108323e3825e5c49519f94dc6a9b94d8 Signed-off-by: Finley Xiao <finley.xiao@rock-chips.com> Signed-off-by: Jacob Chen <jacob2.chen@rock-chips.com>
1 parent 6eeec66 commit e14a3c7

2 files changed

Lines changed: 138 additions & 0 deletions

File tree

drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c

Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#endif
2727

2828
#include <linux/clk.h>
29+
#include <linux/cpufreq.h>
2930
#include <linux/devfreq.h>
3031
#ifdef CONFIG_DEVFREQ_THERMAL
3132
#include <linux/devfreq_cooling.h>
@@ -46,6 +47,11 @@
4647
#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
4748
#endif /* Linux >= 3.13 */
4849

50+
#define MAX_CLUSTERS 2
51+
52+
static struct cpumask allowed_cpus[MAX_CLUSTERS];
53+
static unsigned int cpu_max_freq[MAX_CLUSTERS] = {UINT_MAX, UINT_MAX};
54+
static unsigned int cpu_clipped_freq[MAX_CLUSTERS] = {UINT_MAX, UINT_MAX};
4955

5056
static int
5157
kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
@@ -215,6 +221,63 @@ static void kbase_devfreq_exit(struct device *dev)
215221
kbase_devfreq_term_freq_table(kbdev);
216222
}
217223

224+
static int kbase_devfreq_trans_notifier(struct notifier_block *nb,
225+
unsigned long val, void *data)
226+
{
227+
struct kbase_device *kbdev = container_of(nb, struct kbase_device,
228+
gpu_trans_nb);
229+
struct devfreq_freqs *freqs = data;
230+
unsigned int new_rate = (unsigned int)(freqs->new / 1000);
231+
int i, cpu;
232+
233+
if (!kbdev)
234+
goto out;
235+
236+
dev_dbg(kbdev->dev, "%lu-->%lu cpu limit=%u, gpu limit=%u\n",
237+
freqs->old, freqs->new,
238+
kbdev->cpu_limit_freq,
239+
kbdev->gpu_limit_freq);
240+
241+
if (val == DEVFREQ_PRECHANGE &&
242+
new_rate >= kbdev->gpu_limit_freq) {
243+
for (i = 0; i < MAX_CLUSTERS; i++) {
244+
if (cpu_max_freq[i] > kbdev->cpu_limit_freq) {
245+
/* change policy->max right now */
246+
cpu_clipped_freq[i] = kbdev->cpu_limit_freq;
247+
if (cpumask_empty(&allowed_cpus[i]))
248+
goto out;
249+
cpu = cpumask_any_and(&allowed_cpus[i],
250+
cpu_online_mask);
251+
if (cpu >= nr_cpu_ids)
252+
goto out;
253+
cpufreq_update_policy(cpu);
254+
} else {
255+
/* avoid someone changing policy->max */
256+
cpu_clipped_freq[i] = kbdev->cpu_limit_freq;
257+
}
258+
}
259+
} else if (val == DEVFREQ_POSTCHANGE &&
260+
new_rate < kbdev->gpu_limit_freq) {
261+
for (i = 0; i < MAX_CLUSTERS; i++) {
262+
if (cpu_clipped_freq[i] != UINT_MAX) {
263+
/* recover policy->max right now */
264+
cpu_clipped_freq[i] = UINT_MAX;
265+
if (cpumask_empty(&allowed_cpus[i]))
266+
goto out;
267+
cpu = cpumask_any_and(&allowed_cpus[i],
268+
cpu_online_mask);
269+
if (cpu >= nr_cpu_ids)
270+
goto out;
271+
cpufreq_update_policy(cpu);
272+
}
273+
}
274+
}
275+
276+
out:
277+
278+
return NOTIFY_OK;
279+
}
280+
218281
int kbase_devfreq_init(struct kbase_device *kbdev)
219282
{
220283
struct devfreq_dev_profile *dp;
@@ -257,6 +320,23 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
257320
goto opp_notifier_failed;
258321
}
259322

323+
if (of_property_read_u32(kbdev->dev->of_node, "cpu-limit-freq",
324+
&kbdev->cpu_limit_freq)) {
325+
dev_err(kbdev->dev, "Failed to get prop cpu-limit-freq\n");
326+
kbdev->cpu_limit_freq = UINT_MAX;
327+
}
328+
if (of_property_read_u32(kbdev->dev->of_node, "gpu-limit-freq",
329+
&kbdev->gpu_limit_freq)) {
330+
dev_err(kbdev->dev, "Failed to get prop gpu-limit-freq\n");
331+
kbdev->gpu_limit_freq = UINT_MAX;
332+
}
333+
334+
kbdev->gpu_trans_nb.notifier_call = kbase_devfreq_trans_notifier;
335+
err = devfreq_register_notifier(kbdev->devfreq, &kbdev->gpu_trans_nb,
336+
DEVFREQ_TRANSITION_NOTIFIER);
337+
if (err)
338+
dev_err(kbdev->dev, "register gpu trans notifier (%d)\n", err);
339+
260340
#ifdef CONFIG_DEVFREQ_THERMAL
261341
err = kbase_power_model_simple_init(kbdev);
262342
if (err && err != -ENODEV && err != -EPROBE_DEFER) {
@@ -319,3 +399,57 @@ void kbase_devfreq_term(struct kbase_device *kbdev)
319399
else
320400
kbdev->devfreq = NULL;
321401
}
402+
403+
static int kbase_cpufreq_policy_notifier(struct notifier_block *nb,
404+
unsigned long val, void *data)
405+
{
406+
struct cpufreq_policy *policy = data;
407+
int i;
408+
409+
if (val == CPUFREQ_START) {
410+
for (i = 0; i < MAX_CLUSTERS; i++) {
411+
if (cpumask_test_cpu(policy->cpu,
412+
&allowed_cpus[i]))
413+
break;
414+
if (cpumask_empty(&allowed_cpus[i])) {
415+
cpumask_copy(&allowed_cpus[i],
416+
policy->related_cpus);
417+
break;
418+
}
419+
}
420+
goto out;
421+
}
422+
423+
if (val != CPUFREQ_ADJUST)
424+
goto out;
425+
426+
for (i = 0; i < MAX_CLUSTERS; i++) {
427+
if (cpumask_test_cpu(policy->cpu, &allowed_cpus[i]))
428+
break;
429+
}
430+
if (i == MAX_CLUSTERS)
431+
goto out;
432+
433+
if (policy->max > cpu_clipped_freq[i])
434+
cpufreq_verify_within_limits(policy, 0, cpu_clipped_freq[i]);
435+
436+
cpu_max_freq[i] = policy->max;
437+
pr_debug("cluster%d max=%u, gpu limit=%u\n", i, cpu_max_freq[i],
438+
cpu_clipped_freq[i]);
439+
440+
out:
441+
442+
return NOTIFY_OK;
443+
}
444+
445+
static struct notifier_block notifier_policy_block = {
446+
.notifier_call = kbase_cpufreq_policy_notifier
447+
};
448+
449+
static int __init kbase_cpufreq_init(void)
450+
{
451+
return cpufreq_register_notifier(&notifier_policy_block,
452+
CPUFREQ_POLICY_NOTIFIER);
453+
}
454+
455+
subsys_initcall(kbase_cpufreq_init);

drivers/gpu/arm/midgard/mali_kbase_defs.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1180,6 +1180,10 @@ struct kbase_device {
11801180
/* Boolean indicating if an IRQ flush during reset is in progress. */
11811181
bool irq_reset_flush;
11821182

1183+
struct notifier_block gpu_trans_nb;
1184+
unsigned int gpu_limit_freq;
1185+
unsigned int cpu_limit_freq;
1186+
11831187
/* list of inited sub systems. Used during terminate/error recovery */
11841188
u32 inited_subsys;
11851189
};

0 commit comments

Comments
 (0)