589f80
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h
589f80
--- linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h.morefixes	2016-10-19 10:16:25.000000000 -0400
589f80
+++ linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/cpufeature.h	2016-11-05 08:30:46.276033474 -0400
589f80
@@ -354,6 +354,7 @@ extern const char * const x86_power_flag
589f80
 #define cpu_has_avx		boot_cpu_has(X86_FEATURE_AVX)
589f80
 #define cpu_has_avx2		boot_cpu_has(X86_FEATURE_AVX2)
589f80
 #define cpu_has_ht		boot_cpu_has(X86_FEATURE_HT)
589f80
+#define cpu_has_mp		boot_cpu_has(X86_FEATURE_MP)
589f80
 #define cpu_has_nx		boot_cpu_has(X86_FEATURE_NX)
589f80
 #define cpu_has_xstore		boot_cpu_has(X86_FEATURE_XSTORE)
589f80
 #define cpu_has_xstore_enabled	boot_cpu_has(X86_FEATURE_XSTORE_EN)
589f80
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h
589f80
--- linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h.morefixes	2016-10-19 10:16:25.000000000 -0400
589f80
+++ linux-3.10.0-514.sdl7.i686/arch/x86/include/asm/irq_remapping.h	2016-11-05 09:39:38.425043476 -0400
589f80
@@ -110,7 +110,7 @@ static inline bool setup_remapped_irq(in
589f80
 	return false;
589f80
 }
589f80
 
589f80
-int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
589f80
+static inline int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
589f80
 {
589f80
 	return -ENOSYS;
589f80
 }
589f80
diff -up linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c.morefixes linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c
589f80
--- linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c.morefixes	2016-10-19 10:16:25.000000000 -0400
589f80
+++ linux-3.10.0-514.sdl7.i686/arch/x86/kernel/irq_32.c	2016-11-05 09:49:59.371055125 -0400
589f80
@@ -24,9 +24,12 @@
589f80
 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
589f80
 EXPORT_PER_CPU_SYMBOL(irq_stat);
589f80
 
589f80
+DEFINE_PER_CPU_SHARED_ALIGNED(rh_irq_cpustat_t, rh_irq_stat);
589f80
+
589f80
 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
589f80
 EXPORT_PER_CPU_SYMBOL(irq_regs);
589f80
 
589f80
+
589f80
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
589f80
 
589f80
 int sysctl_panic_on_stackoverflow __read_mostly;
589f80
diff -up linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h.morefixes linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h
589f80
--- linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h.morefixes	2016-10-19 10:16:25.000000000 -0400
589f80
+++ linux-3.10.0-514.sdl7.i686/include/linux/pps_kernel.h	2016-11-05 10:44:41.492049162 -0400
589f80
@@ -115,10 +115,17 @@ static inline void pps_get_ts(struct pps
589f80
 {
589f80
 	struct system_time_snapshot snap;
589f80
 	ktime_get_snapshot(&snap);
589f80
+#if defined CONFIG_X86_64
589f80
 	ts->ts_real = ktime_to_timespec64(snap.real);
589f80
 #ifdef CONFIG_NTP_PPS
589f80
 	ts->ts_raw = ktime_to_timespec64(snap.raw);
589f80
 #endif
589f80
+#else
589f80
+	ts->ts_real = ktime_to_timespec(snap.real);
589f80
+#ifdef CONFIG_NTP_PPS
589f80
+	ts->ts_raw = ktime_to_timespec(snap.raw);
589f80
+#endif
589f80
+#endif
589f80
 }
589f80
 
589f80
 /* Subtract known time delay from PPS event time(s) */
589f80
diff -up linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c.morefixes linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c
589f80
--- linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c.morefixes	2016-10-19 10:16:25.000000000 -0400
589f80
+++ linux-3.10.0-514.sdl7.i686/kernel/hrtimer.c	2016-11-05 10:58:56.726065206 -0400
589f80
@@ -328,6 +328,7 @@ u64 ktime_divns(const ktime_t kt, s64 di
589f80
 
589f80
 	return dclc;
589f80
 }
589f80
+EXPORT_SYMBOL_GPL(ktime_divns);
589f80
 #endif /* BITS_PER_LONG >= 64 */
589f80
 
589f80
 /*
589f80
diff -up linux-3.10.0-514.sdl7.i686/mm/swap.c.morefixes linux-3.10.0-514.sdl7.i686/mm/swap.c
589f80
--- linux-3.10.0-514.sdl7.i686/mm/swap.c.morefixes	2016-10-19 10:16:25.000000000 -0400
589f80
+++ linux-3.10.0-514.sdl7.i686/mm/swap.c	2016-11-05 08:55:41.521061525 -0400
589f80
@@ -972,9 +972,6 @@ void release_pages(struct page **pages,
589f80
 		if (!put_page_testzero(page))
589f80
 			continue;
589f80
 
589f80
-		VM_BUG_ON_PAGE(check_mmu_gather &&
589f80
-			       trans_huge_mmu_gather_count(page), page);
589f80
-
589f80
 		if (PageLRU(page)) {
589f80
 			if (!was_thp)
589f80
 				zone = zone_lru_lock(zone, page, &lock_batch,