Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflict resolution of af_smc.c from Stephen Rothwell.

Signed-off-by: David S. Miller <davem@davemloft.net>
master
David S. Miller 2 years ago
parent
commit
6b0a7f84ea
  1. 24
      .clang-format
  2. 16
      Documentation/networking/rxrpc.txt
  3. 17
      MAINTAINERS
  4. 2
      Makefile
  5. 16
      arch/arm64/include/asm/futex.h
  6. 5
      arch/arm64/include/asm/module.h
  7. 3
      arch/arm64/kernel/ftrace.c
  8. 15
      arch/arm64/kernel/traps.c
  9. 8
      arch/mips/configs/generic/board-ocelot.config
  10. 3
      arch/mips/kernel/kgdb.c
  11. 3
      arch/mips/sgi-ip27/ip27-irq.c
  12. 2
      arch/powerpc/include/asm/mmu.h
  13. 12
      arch/powerpc/kernel/exceptions-64s.S
  14. 8
      arch/powerpc/kernel/head_32.S
  15. 2
      arch/powerpc/kernel/vdso32/gettimeofday.S
  16. 84
      arch/riscv/configs/rv32_defconfig
  17. 8
      arch/riscv/mm/init.c
  18. 20
      arch/sparc/kernel/pci_sun4v.c
  19. 140
      arch/x86/events/amd/core.c
  20. 13
      arch/x86/events/core.c
  21. 8
      arch/x86/events/intel/core.c
  22. 41
      arch/x86/include/asm/bitops.h
  23. 4
      arch/x86/include/asm/kvm_emulate.h
  24. 17
      arch/x86/include/asm/kvm_host.h
  25. 1
      arch/x86/include/uapi/asm/vmx.h
  26. 6
      arch/x86/kernel/cpu/resctrl/rdtgroup.c
  27. 191
      arch/x86/kvm/emulate.c
  28. 4
      arch/x86/kvm/lapic.c
  29. 15
      arch/x86/kvm/mmu.c
  30. 2
      arch/x86/kvm/mmu.h
  31. 4
      arch/x86/kvm/pmu.c
  32. 57
      arch/x86/kvm/svm.c
  33. 4
      arch/x86/kvm/trace.h
  34. 47
      arch/x86/kvm/vmx/nested.c
  35. 35
      arch/x86/kvm/vmx/vmx.c
  36. 2
      arch/x86/kvm/vmx/vmx.h
  37. 64
      arch/x86/kvm/x86.c
  38. 2
      arch/x86/kvm/x86.h
  39. 15
      block/bfq-iosched.c
  40. 2
      block/bfq-iosched.h
  41. 17
      block/bfq-wf2q.c
  42. 5
      block/bio.c
  43. 7
      block/blk-mq.c
  44. 4
      drivers/acpi/acpica/nsobject.c
  45. 12
      drivers/acpi/nfit/core.c
  46. 10
      drivers/acpi/nfit/intel.c
  47. 2
      drivers/block/virtio_blk.c
  48. 2
      drivers/bluetooth/btusb.c
  49. 1
      drivers/char/ipmi/ipmi_dmi.c
  50. 19
      drivers/char/ipmi/ipmi_msghandler.c
  51. 2
      drivers/char/ipmi/ipmi_si_hardcode.c
  52. 57
      drivers/clk/at91/clk-programmable.c
  53. 2
      drivers/clk/at91/pmc.h
  54. 10
      drivers/clk/at91/sama5d2.c
  55. 2
      drivers/clk/imx/clk-pll14xx.c
  56. 3
      drivers/clk/mediatek/clk-gate.c
  57. 2
      drivers/clk/meson/clk-pll.c
  58. 6
      drivers/clk/meson/g12a.c
  59. 2
      drivers/clk/meson/gxbb.c
  60. 4
      drivers/clk/meson/vid-pll-div.c
  61. 14
      drivers/clk/x86/clk-pmc-atom.c
  62. 13
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  63. 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
  64. 1
      drivers/gpu/drm/amd/amdkfd/kfd_device.c
  65. 1
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
  66. 23
      drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
  67. 34
      drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
  68. 5
      drivers/gpu/drm/drm_atomic_helper.c
  69. 9
      drivers/gpu/drm/i915/gvt/dmabuf.c
  70. 12
      drivers/gpu/drm/i915/gvt/gtt.c
  71. 6
      drivers/gpu/drm/i915/gvt/kvmgt.c
  72. 48
      drivers/gpu/drm/i915/icl_dsi.c
  73. 23
      drivers/gpu/drm/i915/intel_ddi.c
  74. 6
      drivers/gpu/drm/i915/intel_display.c
  75. 69
      drivers/gpu/drm/i915/intel_dp.c
  76. 10
      drivers/gpu/drm/i915/intel_drv.h
  77. 24
      drivers/gpu/drm/i915/vlv_dsi.c
  78. 8
      drivers/gpu/drm/mediatek/mtk_dpi.c
  79. 7
      drivers/gpu/drm/mediatek/mtk_drm_drv.c
  80. 46
      drivers/gpu/drm/mediatek/mtk_drm_gem.c
  81. 3
      drivers/gpu/drm/mediatek/mtk_drm_gem.h
  82. 2
      drivers/gpu/drm/mediatek/mtk_hdmi.c
  83. 35
      drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
  84. 5
      drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
  85. 49
      drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
  86. 23
      drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
  87. 26
      drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
  88. 2
      drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
  89. 9
      drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
  90. 5
      drivers/gpu/drm/sun4i/sun8i_tcon_top.c
  91. 1
      drivers/gpu/drm/udl/udl_drv.c
  92. 1
      drivers/gpu/drm/udl/udl_drv.h
  93. 8
      drivers/gpu/drm/udl/udl_main.c
  94. 2
      drivers/gpu/host1x/hw/channel_hw.c
  95. 26
      drivers/infiniband/hw/hfi1/chip.c
  96. 4
      drivers/infiniband/hw/hfi1/qp.c
  97. 4
      drivers/infiniband/hw/hfi1/rc.c
  98. 31
      drivers/infiniband/hw/hfi1/tid_rdma.c
  99. 6
      drivers/infiniband/hw/hns/hns_roce_hem.c
  100. 4
      drivers/infiniband/hw/hns/hns_roce_mr.c

24
.clang-format

@ -78,6 +78,8 @@ ForEachMacros:
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- '__bio_for_each_bvec'
- 'bio_for_each_bvec'
- 'bio_for_each_integrity_vec'
- '__bio_for_each_segment'
- 'bio_for_each_segment'
@ -118,10 +120,12 @@ ForEachMacros:
- 'drm_for_each_legacy_plane'
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- 'drm_mm_for_each_hole'
- 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_available_child_of_node'
@ -158,6 +162,9 @@ ForEachMacros:
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
- 'for_each_endpoint_of_node'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
@ -195,6 +202,7 @@ ForEachMacros:
- 'for_each_net_rcu'
- 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state'
- 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state'
- 'for_each_new_private_obj_in_state'
- 'for_each_node'
@ -210,8 +218,10 @@ ForEachMacros:
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_old_mst_mgr_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
@ -243,6 +253,9 @@ ForEachMacros:
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
- 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_zone'
@ -252,6 +265,8 @@ ForEachMacros:
- 'fwnode_for_each_child_node'
- 'fwnode_graph_for_each_endpoint'
- 'gadget_for_each_ep'
- 'genradix_for_each'
- 'genradix_for_each_from'
- 'hash_for_each'
- 'hash_for_each_possible'
- 'hash_for_each_possible_rcu'
@ -293,7 +308,11 @@ ForEachMacros:
- 'key_for_each'
- 'key_for_each_safe'
- 'klp_for_each_func'
- 'klp_for_each_func_safe'
- 'klp_for_each_func_static'
- 'klp_for_each_object'
- 'klp_for_each_object_safe'
- 'klp_for_each_object_static'
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
@ -324,6 +343,8 @@ ForEachMacros:
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- 'mp_bvec_for_each_page'
- 'mp_bvec_for_each_segment'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
@ -375,6 +396,7 @@ ForEachMacros:
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- '__rq_for_each_bio'
- 'rq_for_each_bvec'
- 'rq_for_each_segment'
- 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg'
@ -410,6 +432,8 @@ ForEachMacros:
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'

16
Documentation/networking/rxrpc.txt

@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
(*) Check call still alive.
u32 rxrpc_kernel_check_life(struct socket *sock,
struct rxrpc_call *call);
bool rxrpc_kernel_check_life(struct socket *sock,
struct rxrpc_call *call,
u32 *_life);
void rxrpc_kernel_probe_life(struct socket *sock,
struct rxrpc_call *call);
The first function returns a number that is updated when ACKs are received
from the peer (notably including PING RESPONSE ACKs which we can elicit by
sending PING ACKs to see if the call still exists on the server). The
caller should compare the numbers of two calls to see if the call is still
alive after waiting for a suitable interval.
The first function passes back in *_life a number that is updated when
ACKs are received from the peer (notably including PING RESPONSE ACKs
which we can elicit by sending PING ACKs to see if the call still exists
on the server). The caller should compare the numbers of two calls to see
if the call is still alive after waiting for a suitable interval. It also
returns true as long as the call hasn't yet reached the completed state.
This allows the caller to work out if the server is still contactable and
if the call is still alive on the server while waiting for the server to

17
MAINTAINERS

@ -10139,7 +10139,7 @@ F: drivers/spi/spi-at91-usart.c
F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
M: Woojung Huh <Woojung.Huh@microchip.com>
M: Woojung Huh <woojung.huh@microchip.com>
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
L: netdev@vger.kernel.org
S: Maintained
@ -16503,7 +16503,7 @@ F: drivers/char/virtio_console.c
F: include/linux/virtio_console.h
F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
VIRTIO CORE AND NET DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org
@ -16518,6 +16518,19 @@ F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/
F: mm/balloon_compaction.c
VIRTIO BLOCK AND SCSI DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Paolo Bonzini <pbonzini@redhat.com>
R: Stefan Hajnoczi <stefanha@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/block/virtio_blk.c
F: drivers/scsi/virtio_scsi.c
F: include/uapi/linux/virtio_blk.h
F: include/uapi/linux/virtio_scsi.h
F: drivers/vhost/scsi.c
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
L: virtualization@lists.linux-foundation.org

2
Makefile

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Shy Crocodile
# *DOCUMENTATION*

16
arch/arm64/include/asm/futex.h

@ -30,8 +30,8 @@ do { \
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \
"2: stlxr %w0, %w3, %2\n" \
" cbnz %w0, 1b\n" \
" dmb ish\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
@ -50,30 +50,30 @@ do { \
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
{
int oldval = 0, ret, tmp;
int oldval, ret, tmp;
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %w0, %w4",
__futex_atomic_op("mov %w3, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %w0, %w1, %w4",
__futex_atomic_op("add %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("orr %w0, %w1, %w4",
__futex_atomic_op("orr %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and %w0, %w1, %w4",
__futex_atomic_op("and %w3, %w1, %w4",
ret, oldval, uaddr, tmp, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("eor %w0, %w1, %w4",
__futex_atomic_op("eor %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
default:

5
arch/arm64/include/asm/module.h

@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
struct plt_entry get_plt_entry(u64 dst, void *pc);
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
static inline bool plt_entry_is_initialized(const struct plt_entry *e)
{
return e->adrp || e->add || e->br;
}
#endif /* __ASM_MODULE_H */

3
arch/arm64/kernel/ftrace.c

@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
&trampoline)) {
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
&(struct plt_entry){})) {
if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL;
}

15
arch/arm64/kernel/traps.c

@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
int skip;
int skip = 0;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (regs) {
if (user_mode(regs))
return;
skip = 1;
}
if (!tsk)
tsk = current;
@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.graph = 0;
#endif
skip = !!regs;
printk("Call trace:\n");
do {
/* skip until specified stack frame */
@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret;
print_modules();
__show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
end_of_stack(tsk));
show_regs(regs);
if (!user_mode(regs)) {
dump_backtrace(regs, tsk);
if (!user_mode(regs))
dump_instr(KERN_EMERG, regs);
}
return ret;
}

8
arch/mips/configs/generic/board-ocelot.config

@ -1,6 +1,10 @@
# require CONFIG_CPU_MIPS32_R2=y
CONFIG_LEGACY_BOARD_OCELOT=y
CONFIG_FIT_IMAGE_FDT_OCELOT=y
CONFIG_BRIDGE=y
CONFIG_GENERIC_PHY=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_NET_SWITCHDEV=y
CONFIG_NET_DSA=y
CONFIG_MSCC_OCELOT_SWITCH=y
CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
CONFIG_MDIO_MSCC_MIIM=y
@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y
CONFIG_SPI_DW_MMIO=y
CONFIG_SPI_SPIDEV=y
CONFIG_PINCTRL_OCELOT=y
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y

3
arch/mips/kernel/kgdb.c

@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored)
old_fs = get_fs();
set_fs(KERNEL_DS);
kgdb_nmicallback(raw_smp_processor_id(), NULL);
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
set_fs(old_fs);
}

3
arch/mips/sgi-ip27/ip27-irq.c

@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d)
{
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
struct bridge_controller *bc;
int pin = hd->pin;
if (!hd)
return;
@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d)
disable_hub_irq(d);
bc = hd->bc;
bridge_clr(bc, b_int_enable, (1 << pin));
bridge_clr(bc, b_int_enable, (1 << hd->pin));
bridge_read(bc, b_wid_tflush);
}

2
arch/powerpc/include/asm/mmu.h

@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
defined (CONFIG_PPC_64K_PAGES)
#define MAX_PHYSMEM_BITS 51
#elif defined(CONFIG_SPARSEMEM)
#elif defined(CONFIG_PPC64)
#define MAX_PHYSMEM_BITS 46
#endif

12
arch/powerpc/kernel/exceptions-64s.S

@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
ld r4,PACA_EXSLB+EX_DAR(r13)
std r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
bl do_slb_fault
cmpdi r3,0
bne- 1f
b fast_exception_return
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
ld r4,_NIP(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
bl do_slb_fault
cmpdi r3,0
bne- 1f
b fast_exception_return
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)

8
arch/powerpc/kernel/head_32.S

@ -851,10 +851,6 @@ __secondary_start:
tophys(r4,r2)
addi r4,r4,THREAD /* phys address of our thread_struct */
mtspr SPRN_SPRG_THREAD,r4
#ifdef CONFIG_PPC_RTAS
li r3,0
stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
#endif
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
mtspr SPRN_SPRG_PGDIR, r4
@ -941,10 +937,6 @@ start_here:
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRN_SPRG_THREAD,r4
#ifdef CONFIG_PPC_RTAS
li r3,0
stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
#endif
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
mtspr SPRN_SPRG_PGDIR, r4

2
arch/powerpc/kernel/vdso32/gettimeofday.S

@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
* can be used, r7 contains NSEC_PER_SEC.
*/
lwz r5,WTOM_CLOCK_SEC(r9)
lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
lwz r6,WTOM_CLOCK_NSEC(r9)
/* We now have our offset in r5,r6. We create a fake dependency

84
arch/riscv/configs/rv32_defconfig

@ -0,0 +1,84 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETLINK_DIAG=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
CONFIG_MICROSEMI_PHY=y
CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_VIRTIO_MMIO=y
CONFIG_SIFIVE_PLIC=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
# CONFIG_RCU_TRACE is not set

8
arch/riscv/mm/init.c

@ -121,6 +121,14 @@ void __init setup_bootmem(void)
*/
memblock_reserve(reg->base, vmlinux_end - reg->base);
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
/*
* Remove memblock from the end of usable area to the
* end of region
*/
if (reg->base + mem_size < end)
memblock_remove(reg->base + mem_size,
end - reg->base - mem_size);
}
}
BUG_ON(mem_size == 0);

20
arch/sparc/kernel/pci_sun4v.c

@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
p->npages = 0;
}
static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
{
return iommu->atu && mask > DMA_BIT_MASK(32);
}
/* Interrupts must be disabled. */
static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{
@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) {
if (!iommu_use_atu(pbm->iommu, mask)) {
num = pci_sun4v_iommu_map(devhandle,
HV_PCI_TSBID(0, entry),
npages,
@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0;
struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl;
struct page *page;
void *ret;
@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
atu = iommu->atu;
mask = dev->coherent_dma_mask;
if (mask <= DMA_BIT_MASK(32) || !atu)
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;
tbl = &iommu->atu->tbl;
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
atu = iommu->atu;
devhandle = pbm->devhandle;
if (dvma <= DMA_BIT_MASK(32)) {
if (!iommu_use_atu(iommu, dvma)) {
tbl = &iommu->tbl;
iotsb_num = 0; /* we don't care for legacy iommu */
} else {
@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages >>= IO_PAGE_SHIFT;
mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32))
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;
@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32))
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;

140
arch/x86/events/amd/core.c

@ -3,10 +3,14 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <asm/apicdef.h>
#include <asm/nmi.h>
#include "../perf_event.h"
static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
}
}
/*
* When a PMC counter overflows, an NMI is used to process the event and
* reset the counter. NMI latency can result in the counter being updated
* before the NMI can run, which can result in what appear to be spurious
* NMIs. This function is intended to wait for the NMI to run and reset
* the counter to avoid possible unhandled NMI messages.
*/
#define OVERFLOW_WAIT_COUNT 50
static void amd_pmu_wait_on_overflow(int idx)
{
unsigned int i;
u64 counter;
/*
* Wait for the counter to be reset if it has overflowed. This loop
* should exit very, very quickly, but just in case, don't wait
* forever...
*/
for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
rdmsrl(x86_pmu_event_addr(idx), counter);
if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
break;
/* Might be in IRQ context, so can't sleep */
udelay(1);
}
}
static void amd_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
x86_pmu_disable_all();
/*
* This shouldn't be called from NMI context, but add a safeguard here
* to return, since if we're in NMI context we can't wait for an NMI
* to reset an overflowed counter value.
*/
if (in_nmi())
return;
/*
* Check each counter for overflow and wait for it to be reset by the
* NMI if it has overflowed. This relies on the fact that all active
* counters are always enabled when this function is caled and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
amd_pmu_wait_on_overflow(idx);
}
}
static void amd_pmu_disable_event(struct perf_event *event)
{
x86_pmu_disable_event(event);
/*
* This can be called from NMI context (via x86_pmu_stop). The counter
* may have overflowed, but either way, we'll never see it get reset
* by the NMI if we're already in the NMI. And the NMI latency support
* below will take care of any pending NMI that might have been
* generated by the overflow.
*/
if (in_nmi())
return;
amd_pmu_wait_on_overflow(event->hw.idx);
}
/*
* Because of NMI latency, if multiple PMC counters are active or other sources
* of NMIs are received, the perf NMI handler can handle one or more overflowed
* PMC counters outside of the NMI associated with the PMC overflow. If the NMI
* doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
* back-to-back NMI support won't be active. This PMC handler needs to take into
* account that this can occur, otherwise this could result in unknown NMI
* messages being issued. Examples of this is PMC overflow while in the NMI
* handler when multiple PMCs are active or PMC overflow while handling some
* other source of an NMI.
*
* Attempt to mitigate this by using the number of active PMCs to determine
* whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
* any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
* number of active PMCs or 2. The value of 2 is used in case an NMI does not
* arrive at the LAPIC in time to be collapsed into an already pending NMI.
*/
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int active, handled;
/*
* Obtain the active count before calling x86_pmu_handle_irq() since
* it is possible that x86_pmu_handle_irq() may make a counter
* inactive (through x86_pmu_stop).
*/
active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
/* Process any counter overflows */
handled = x86_pmu_handle_irq(regs);
/*
* If a counter was handled, record the number of possible remaining
* NMIs that can occur.
*/
if (handled) {
this_cpu_write(perf_nmi_counter,
min_t(unsigned int, 2, active));
return handled;
}
if (!this_cpu_read(perf_nmi_counter))
return NMI_DONE;
this_cpu_dec(perf_nmi_counter);
return NMI_HANDLED;
}
static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.handle_irq = amd_pmu_handle_irq,
.disable_all = amd_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
.disable = amd_pmu_disable_event,
.hw_config = amd_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_K7_EVNTSEL0,
@ -732,7 +862,7 @@ void amd_pmu_enable_virt(void)
cpuc->perf_ctr_virt_mask = 0;
/* Reload all events */
x86_pmu_disable_all();
amd_pmu_disable_all();
x86_pmu_enable_all(0);
}
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
@ -750,7 +880,7 @@ void amd_pmu_disable_virt(void)
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
/* Reload all events */
x86_pmu_disable_all();
amd_pmu_disable_all();
x86_pmu_enable_all(0);
}
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);

13
arch/x86/events/core.c

@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
if (test_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event);
__clear_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
apic_write(APIC_LVTPC, APIC_DM_NMI);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask)) {
/*
* Though we deactivated the counter some cpus
* might still deliver spurious interrupts still
* in flight. Catch them:
*/
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
if (!test_bit(idx, cpuc->active_mask))
continue;
}
event = cpuc->events[idx];

8
arch/x86/events/intel/core.c

@ -3185,7 +3185,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret;
if (event->attr.precise_ip) {
if (!event->attr.freq) {
if (!(event->attr.freq || event->attr.wakeup_events)) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event)))
@ -3575,6 +3575,12 @@ static void intel_pmu_cpu_starting(int cpu)
cpuc->lbr_sel = NULL;
if (x86_pmu.flags & PMU_FL_TFA) {
WARN_ON_ONCE(cpuc->tfa_shadow);
cpuc->tfa_shadow = ~0ULL;
intel_set_tfa(cpuc, false);
}
if (x86_pmu.version > 1)
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);

41
arch/x86/include/asm/bitops.h

@ -36,16 +36,17 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
#define ADDR BITOP_ADDR(addr)
#define ADDR RLONG_ADDR(addr)
/*
* We do the locked ops that don't return the old value as
* a mask operation on a byte.
*/
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
/**
@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
: "memory");
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
*/
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
/**
@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)~CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1"
CC_SET(s)
: CC_OUT(s) (negative), ADDR
: CC_OUT(s) (negative), WBYTE_ADDR(addr)
: "ir" ((char) ~(1 << nr)) : "memory");
return negative;
}
@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
}
@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
*/
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
/**
@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
asm(__ASM_SIZE(bts) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr));
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
asm volatile(__ASM_SIZE(btr) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr));
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
asm volatile(__ASM_SIZE(btc) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr) : "memory");
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
asm volatile(__ASM_SIZE(bt) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
: "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
return oldbit;
}

4
arch/x86/include/asm/kvm_emulate.h

@ -226,7 +226,9 @@ struct x86_emulate_ops {
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
const char *smstate);
void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
};

17
arch/x86/include/asm/kvm_host.h

@ -126,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
}
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_MIN_ALLOC_MMU_PAGES 64UL
#define KVM_MMU_HASH_SHIFT 12
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5
@ -844,9 +844,9 @@ enum kvm_irqchip_mode {
};
struct kvm_arch {
unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages;
unsigned long n_used_mmu_pages;
unsigned long n_requested_mmu_pages;
unsigned long n_max_mmu_pages;
unsigned int indirect_shadow_pages;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
@ -1182,7 +1182,7 @@ struct kvm_x86_ops {
int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
int (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@ -1256,8 +1256,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
bool pdptrs_changed(struct kvm_vcpu *vcpu);
@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
#define put_smstate(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val
#define GET_SMSTATE(type, buf, offset) \
(*(type *)((buf) + (offset) - 0x7e00))
#endif /* _ASM_X86_KVM_HOST_H */

1
arch/x86/include/uapi/asm/vmx.h

@ -146,6 +146,7 @@
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
#define VMX_ABORT_VMCS_CORRUPTED 3
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */

6
arch/x86/kernel/cpu/resctrl/rdtgroup.c

@ -2039,14 +2039,14 @@ out:
enum rdt_param {
Opt_cdp,
Opt_cdpl2,
Opt_mba_mpbs,
Opt_mba_mbps,
nr__rdt_params
};
static const struct fs_parameter_spec rdt_param_specs[] = {
fsparam_flag("cdp", Opt_cdp),
fsparam_flag("cdpl2", Opt_cdpl2),
fsparam_flag("mba_mpbs", Opt_mba_mpbs),
fsparam_flag("mba_MBps", Opt_mba_mbps),
{}
};
@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_cdpl2:
ctx->enable_cdpl2 = true;
return 0;
case Opt_mba_mpbs:
case Opt_mba_mbps:
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -EINVAL;
ctx->enable_mba_mbps = true;

191
arch/x86/kvm/emulate.c

@ -2331,24 +2331,18 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
#ifdef CONFIG_X86_64
u32 eax, ebx, ecx, edx;
eax = 0x80000001;
ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
return edx & bit(X86_FEATURE_LM);
#else
return false;
#endif
}
#define GET_SMSTATE(type, smbase, offset) \
({ \
type __val; \
int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
sizeof(__val)); \
if (r != X86EMUL_CONTINUE) \
return X86EMUL_UNHANDLEABLE; \
__val; \
})
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
desc->g = (flags >> 23) & 1;
@ -2361,27 +2355,30 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
desc->type = (flags >> 8) & 15;
}
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
int n)
{
struct desc_struct desc;
int offset;
u16 selector;
selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
if (n < 3)
offset = 0x7f84 + n * 12;
else
offset = 0x7f2c + (n - 3) * 12;
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
return X86EMUL_CONTINUE;
}
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
#ifdef CONFIG_X86_64
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
int n)
{
struct desc_struct desc;
int offset;
@ -2390,15 +2387,16 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
offset = 0x7e00 + n * 16;
selector = GET_SMSTATE(u16, smbase, offset);
rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
base3 = GET_SMSTATE(u32, smbase, offset + 12);
selector = GET_SMSTATE(u16, smstate, offset);
rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
base3 = GET_SMSTATE(u32, smstate, offset + 12);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
return X86EMUL_CONTINUE;
}
#endif
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr3, u64 cr4)
@ -2445,7 +2443,8 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
return X86EMUL_CONTINUE;
}
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
const char *smstate)
{
struct desc_struct desc;
struct desc_ptr dt;
@ -2453,53 +2452,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
u32 val, cr0, cr3, cr4;
int i;
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
for (i = 0; i < 8; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smbase, 0x7fcc);
val = GET_SMSTATE(u32, smstate, 0x7fcc);
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
val = GET_SMSTATE(u32, smbase, 0x7fc8);
val = GET_SMSTATE(u32, smstate, 0x7fc8);
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
selector = GET_SMSTATE(u32, smbase, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
selector = GET_SMSTATE(u32, smbase, 0x7fc0);
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
selector = GET_SMSTATE(u32, smstate, 0x7fc0);
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
ctxt->ops->set_gdt(ctxt, &dt);
dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
ctxt->ops->set_idt(ctxt, &dt);
for (i = 0; i < 6; i++) {
int r = rsm_load_seg_32(ctxt, smbase, i);
int r = rsm_load_seg_32(ctxt, smstate, i);
if (r != X86EMUL_CONTINUE)
return r;
}
cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
}