Skip to content

Commit a057484

Browse files
author
Alex Shi
committed
Merge remote-tracking branch 'lts/linux-4.4.y' into linux-linaro-lsk-v4.4
Conflicts: also change cpu_enable_uao in arch/arm64/include/asm/processor.h comment unmatch fixed in arch/arm64/kernel/suspend.c
2 parents 3ba1692 + c95b7f1 commit a057484

52 files changed

Lines changed: 344 additions & 120 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
VERSION = 4
22
PATCHLEVEL = 4
3-
SUBLEVEL = 36
3+
SUBLEVEL = 38
44
EXTRAVERSION =
55
NAME = Blurry Fish Butt
66

arch/arc/include/asm/delay.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,11 @@
2222
static inline void __delay(unsigned long loops)
2323
{
2424
__asm__ __volatile__(
25-
" lp 1f \n"
26-
" nop \n"
27-
"1: \n"
28-
: "+l"(loops));
25+
" mov lp_count, %0 \n"
26+
" lp 1f \n"
27+
" nop \n"
28+
"1: \n"
29+
: : "r"(loops));
2930
}
3031

3132
extern void __bad_udelay(void);

arch/arm64/include/asm/cpufeature.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ struct arm64_cpu_capabilities {
8181
const char *desc;
8282
u16 capability;
8383
bool (*matches)(const struct arm64_cpu_capabilities *);
84-
void (*enable)(void *); /* Called on all active CPUs */
84+
int (*enable)(void *); /* Called on all active CPUs */
8585
union {
8686
struct { /* To be used for erratum handling only */
8787
u32 midr_model;

arch/arm64/include/asm/processor.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ static inline void spin_lock_prefetch(const void *ptr)
190190

191191
#endif
192192

193-
void cpu_enable_pan(void *__unused);
194-
void cpu_enable_uao(void *__unused);
193+
int cpu_enable_pan(void *__unused);
194+
int cpu_enable_uao(void *__unused);
195195

196196
#endif /* __ASM_PROCESSOR_H */

arch/arm64/kernel/cpufeature.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,9 @@
1919
#define pr_fmt(fmt) "CPU features: " fmt
2020

2121
#include <linux/bsearch.h>
22+
#include <linux/cpumask.h>
2223
#include <linux/sort.h>
24+
#include <linux/stop_machine.h>
2325
#include <linux/types.h>
2426
#include <asm/cpu.h>
2527
#include <asm/cpufeature.h>
@@ -823,7 +825,13 @@ enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
823825

824826
for (i = 0; caps[i].matches; i++)
825827
if (caps[i].enable && cpus_have_cap(caps[i].capability))
826-
on_each_cpu(caps[i].enable, NULL, true);
828+
/*
829+
* Use stop_machine() as it schedules the work allowing
830+
* us to modify PSTATE, instead of on_each_cpu() which
831+
* uses an IPI, giving us a PSTATE that disappears when
832+
* we return.
833+
*/
834+
stop_machine(caps[i].enable, NULL, cpu_online_mask);
827835
}
828836

829837
#ifdef CONFIG_HOTPLUG_CPU

arch/arm64/kernel/suspend.c

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
#include <linux/ftrace.h>
22
#include <linux/percpu.h>
33
#include <linux/slab.h>
4+
#include <asm/alternative.h>
45
#include <asm/cacheflush.h>
6+
#include <asm/cpufeature.h>
57
#include <asm/debug-monitors.h>
68
#include <asm/pgtable.h>
79
#include <asm/memory.h>
@@ -88,11 +90,16 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
8890
ret = fn(arg);
8991

9092
/*
91-
* Never gets here, unless the suspend finisher fails.
92-
* Successful cpu_suspend() should return from cpu_resume(),
93-
* returning through this code path is considered an error
94-
* If the return value is set to 0 force ret = -EOPNOTSUPP
95-
* to make sure a proper error condition is propagated
93+
* PSTATE was not saved over suspend/resume, re-enable any
94+
* detected features that might not have been set correctly.
95+
*/
96+
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
97+
CONFIG_ARM64_PAN));
98+
99+
/*
100+
* Restore HW breakpoint registers to sane values
101+
* before debug exceptions are possibly reenabled
102+
* through local_dbg_restore.
96103
*/
97104
if (!ret)
98105
ret = -EOPNOTSUPP;

arch/arm64/mm/fault.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,9 @@
2929
#include <linux/sched.h>
3030
#include <linux/highmem.h>
3131
#include <linux/perf_event.h>
32+
#include <linux/preempt.h>
3233

34+
#include <asm/bug.h>
3335
#include <asm/cpufeature.h>
3436
#include <asm/exception.h>
3537
#include <asm/debug-monitors.h>
@@ -641,9 +643,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
641643
NOKPROBE_SYMBOL(do_debug_exception);
642644

643645
#ifdef CONFIG_ARM64_PAN
644-
void cpu_enable_pan(void *__unused)
646+
int cpu_enable_pan(void *__unused)
645647
{
648+
/*
649+
* We modify PSTATE. This won't work from irq context as the PSTATE
650+
* is discarded once we return from the exception.
651+
*/
652+
WARN_ON_ONCE(in_interrupt());
653+
646654
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
655+
asm(SET_PSTATE_PAN(1));
656+
return 0;
647657
}
648658
#endif /* CONFIG_ARM64_PAN */
649659

@@ -654,7 +664,7 @@ void cpu_enable_pan(void *__unused)
654664
* We need to enable the feature at runtime (instead of adding it to
655665
* PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
656666
*/
657-
void cpu_enable_uao(void *__unused)
667+
int cpu_enable_uao(void *__unused)
658668
{
659669
asm(SET_PSTATE_UAO(1));
660670
}

arch/sparc/kernel/signal_32.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
8989
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
9090

9191
/* 1. Make sure we are not getting garbage from the user */
92-
if (!invalid_frame_pointer(sf, sizeof(*sf)))
92+
if (invalid_frame_pointer(sf, sizeof(*sf)))
9393
goto segv_and_exit;
9494

9595
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
150150

151151
synchronize_user_stack();
152152
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
153-
if (!invalid_frame_pointer(sf, sizeof(*sf)))
153+
if (invalid_frame_pointer(sf, sizeof(*sf)))
154154
goto segv;
155155

156156
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))

arch/sparc/mm/init_64.c

Lines changed: 64 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -800,8 +800,10 @@ struct mdesc_mblock {
800800
};
801801
static struct mdesc_mblock *mblocks;
802802
static int num_mblocks;
803+
static int find_numa_node_for_addr(unsigned long pa,
804+
struct node_mem_mask *pnode_mask);
803805

804-
static unsigned long ra_to_pa(unsigned long addr)
806+
static unsigned long __init ra_to_pa(unsigned long addr)
805807
{
806808
int i;
807809

@@ -817,8 +819,11 @@ static unsigned long ra_to_pa(unsigned long addr)
817819
return addr;
818820
}
819821

820-
static int find_node(unsigned long addr)
822+
static int __init find_node(unsigned long addr)
821823
{
824+
static bool search_mdesc = true;
825+
static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
826+
static int last_index;
822827
int i;
823828

824829
addr = ra_to_pa(addr);
@@ -828,13 +833,30 @@ static int find_node(unsigned long addr)
828833
if ((addr & p->mask) == p->val)
829834
return i;
830835
}
831-
/* The following condition has been observed on LDOM guests.*/
832-
WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
833-
" rule. Some physical memory will be owned by node 0.");
834-
return 0;
836+
/* The following condition has been observed on LDOM guests because
837+
* node_masks only contains the best latency mask and value.
838+
* LDOM guest's mdesc can contain a single latency group to
839+
* cover multiple address range. Print warning message only if the
840+
* address cannot be found in node_masks nor mdesc.
841+
*/
842+
if ((search_mdesc) &&
843+
((addr & last_mem_mask.mask) != last_mem_mask.val)) {
844+
/* find the available node in the mdesc */
845+
last_index = find_numa_node_for_addr(addr, &last_mem_mask);
846+
numadbg("find_node: latency group for address 0x%lx is %d\n",
847+
addr, last_index);
848+
if ((last_index < 0) || (last_index >= num_node_masks)) {
849+
/* WARN_ONCE() and use default group 0 */
850+
WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
851+
search_mdesc = false;
852+
last_index = 0;
853+
}
854+
}
855+
856+
return last_index;
835857
}
836858

837-
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
859+
static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
838860
{
839861
*nid = find_node(start);
840862
start += PAGE_SIZE;
@@ -1158,6 +1180,41 @@ int __node_distance(int from, int to)
11581180
return numa_latency[from][to];
11591181
}
11601182

1183+
static int find_numa_node_for_addr(unsigned long pa,
1184+
struct node_mem_mask *pnode_mask)
1185+
{
1186+
struct mdesc_handle *md = mdesc_grab();
1187+
u64 node, arc;
1188+
int i = 0;
1189+
1190+
node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1191+
if (node == MDESC_NODE_NULL)
1192+
goto out;
1193+
1194+
mdesc_for_each_node_by_name(md, node, "group") {
1195+
mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
1196+
u64 target = mdesc_arc_target(md, arc);
1197+
struct mdesc_mlgroup *m = find_mlgroup(target);
1198+
1199+
if (!m)
1200+
continue;
1201+
if ((pa & m->mask) == m->match) {
1202+
if (pnode_mask) {
1203+
pnode_mask->mask = m->mask;
1204+
pnode_mask->val = m->match;
1205+
}
1206+
mdesc_release(md);
1207+
return i;
1208+
}
1209+
}
1210+
i++;
1211+
}
1212+
1213+
out:
1214+
mdesc_release(md);
1215+
return -1;
1216+
}
1217+
11611218
static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
11621219
{
11631220
int i;

arch/x86/kernel/head_32.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -571,7 +571,7 @@ early_idt_handler_common:
571571
movl %eax,%ds
572572
movl %eax,%es
573573

574-
cmpl $(__KERNEL_CS),32(%esp)
574+
cmpw $(__KERNEL_CS),32(%esp)
575575
jne 10f
576576

577577
leal 28(%esp),%eax # Pointer to %eip

0 commit comments

Comments
 (0)