24d93b
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h
24d93b
--- linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h.morefixes	2016-10-19 10:16:25.000000000 -0400
24d93b
+++ linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h	2016-11-05 08:30:46.276033474 -0400
24d93b
@@ -354,6 +354,7 @@ extern const char * const x86_power_flag
24d93b
 #define cpu_has_avx		boot_cpu_has(X86_FEATURE_AVX)
24d93b
 #define cpu_has_avx2		boot_cpu_has(X86_FEATURE_AVX2)
24d93b
 #define cpu_has_ht		boot_cpu_has(X86_FEATURE_HT)
24d93b
+#define cpu_has_mp		boot_cpu_has(X86_FEATURE_MP)
24d93b
 #define cpu_has_nx		boot_cpu_has(X86_FEATURE_NX)
24d93b
 #define cpu_has_xstore		boot_cpu_has(X86_FEATURE_XSTORE)
24d93b
 #define cpu_has_xstore_enabled	boot_cpu_has(X86_FEATURE_XSTORE_EN)
24d93b
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h
24d93b
--- linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h.morefixes	2016-10-19 10:16:25.000000000 -0400
24d93b
+++ linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h	2016-11-05 09:39:38.425043476 -0400
24d93b
@@ -110,7 +110,7 @@ static inline bool setup_remapped_irq(in
24d93b
 	return false;
24d93b
 }
24d93b
 
24d93b
-int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
24d93b
+static inline int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
24d93b
 {
24d93b
 	return -ENOSYS;
24d93b
 }
24d93b
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c
24d93b
--- linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c.morefixes	2016-10-19 10:16:25.000000000 -0400
24d93b
+++ linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c	2016-11-05 09:49:59.371055125 -0400
24d93b
@@ -24,9 +24,12 @@
24d93b
 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
24d93b
 EXPORT_PER_CPU_SYMBOL(irq_stat);
24d93b
 
24d93b
+DEFINE_PER_CPU_SHARED_ALIGNED(rh_irq_cpustat_t, rh_irq_stat);
24d93b
+
24d93b
 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
24d93b
 EXPORT_PER_CPU_SYMBOL(irq_regs);
24d93b
 
24d93b
+
24d93b
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
24d93b
 
24d93b
 int sysctl_panic_on_stackoverflow __read_mostly;
24d93b
diff -up linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h.morefixes linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h
24d93b
--- linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h.morefixes	2016-10-19 10:16:25.000000000 -0400
24d93b
+++ linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h	2016-11-05 10:44:41.492049162 -0400
24d93b
@@ -115,10 +115,17 @@ static inline void pps_get_ts(struct pps
24d93b
 {
24d93b
 	struct system_time_snapshot snap;
24d93b
 	ktime_get_snapshot(&snap);
24d93b
+#if defined CONFIG_X86_64
24d93b
 	ts->ts_real = ktime_to_timespec64(snap.real);
24d93b
 #ifdef CONFIG_NTP_PPS
24d93b
 	ts->ts_raw = ktime_to_timespec64(snap.raw);
24d93b
 #endif
24d93b
+#else
24d93b
+	ts->ts_real = ktime_to_timespec(snap.real);
24d93b
+#ifdef CONFIG_NTP_PPS
24d93b
+	ts->ts_raw = ktime_to_timespec(snap.raw);
24d93b
+#endif
24d93b
+#endif
24d93b
 }
24d93b
 
24d93b
 /* Subtract known time delay from PPS event time(s) */
24d93b
diff -up linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c.morefixes linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c
24d93b
--- linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c.morefixes	2016-10-19 10:16:25.000000000 -0400
24d93b
+++ linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c	2016-11-05 10:58:56.726065206 -0400
24d93b
@@ -328,6 +328,7 @@ u64 ktime_divns(const ktime_t kt, s64 di
24d93b
 
24d93b
 	return dclc;
24d93b
 }
24d93b
+EXPORT_SYMBOL_GPL(ktime_divns);
24d93b
 #endif /* BITS_PER_LONG >= 64 */
24d93b
 
24d93b
 /*
24d93b
diff -up linux-3.10.0-514.sdl7.i686/mm/swap.c.morefixes linux-3.10.0-514.sdl7.i686/mm/swap.c
24d93b
--- linux-3.10.0-514.sdl7.i686/mm/swap.c.morefixes	2016-10-19 10:16:25.000000000 -0400
24d93b
+++ linux-3.10.0-514.sdl7.i686/mm/swap.c	2016-11-05 08:55:41.521061525 -0400
24d93b
@@ -972,9 +972,6 @@ void release_pages(struct page **pages,
24d93b
 		if (!put_page_testzero(page))
24d93b
 			continue;
24d93b
 
24d93b
-		VM_BUG_ON_PAGE(check_mmu_gather &&
24d93b
-			       trans_huge_mmu_gather_count(page), page);
24d93b
-
24d93b
 		if (PageLRU(page)) {
24d93b
 			if (!was_thp)
24d93b
 				zone = zone_lru_lock(zone, page, &lock_batch,