Home Home > GIT Browse > SLE15-SP1-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2019-07-18 07:14:02 +0200
committerKernel Build Daemon <kbuild@suse.de>2019-07-18 07:14:02 +0200
commit86af67220adebde85e24ae636a9caab94bb89c38 (patch)
tree55c50c658011f68d26e4d57bf2d46548d42ab405
parentb96b1f3e2161a59cf5a0611eafd343fc31db358a (diff)
parent1395f17cf1f5ef7fec72efd332ff8b1da2b4d9a1 (diff)
Merge branch 'SLE15-SP1' into SLE15-SP1-AZURESLE15-SP1-AZURE
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/include/asm/topology.h3
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c2
-rw-r--r--arch/ia64/kernel/numa.c2
-rw-r--r--arch/ia64/mm/discontig.c6
-rw-r--r--arch/powerpc/crypto/crc-vpmsum_test.c10
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h3
-rw-r--r--arch/powerpc/include/asm/pgtable.h15
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S9
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/mm/numa.c40
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c44
-rw-r--r--arch/s390/include/asm/kvm_host.h5
-rw-r--r--arch/s390/include/asm/lowcore.h8
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/vtime.c19
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/kvm-s390.c17
-rw-r--r--arch/sparc/kernel/pci_fire.c3
-rw-r--r--arch/sparc/kernel/pci_schizo.c3
-rw-r--r--arch/sparc/kernel/psycho_common.c3
-rw-r--r--arch/sparc/kernel/sbus.c3
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/x86/include/asm/kvm_host.h18
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c7
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/svm.c11
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/net/bpf_jit_comp.c91
-rw-r--r--crypto/chacha20poly1305.c30
-rw-r--r--crypto/ghash-generic.c8
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c5
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c12
-rw-r--r--drivers/cpufreq/cpufreq.c18
-rw-r--r--drivers/cpufreq/intel_pstate.c23
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c19
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c2
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c8
-rw-r--r--drivers/crypto/ccp/ccp-dev.c96
-rw-r--r--drivers/crypto/ccp/ccp-dev.h2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c15
-rw-r--r--drivers/crypto/talitos.c72
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c5
-rw-r--r--drivers/gpu/drm/drm_ioc32.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c10
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c24
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c10
-rw-r--r--drivers/hid/wacom_sys.c13
-rw-r--r--drivers/hid/wacom_wac.c23
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/hwtracing/intel_th/msu.c2
-rw-r--r--drivers/iommu/dmar.c5
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/md/bcache/alloc.c14
-rw-r--r--drivers/md/bcache/bcache.h6
-rw-r--r--drivers/md/bcache/bset.c77
-rw-r--r--drivers/md/bcache/bset.h34
-rw-r--r--drivers/md/bcache/btree.c55
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/io.c12
-rw-r--r--drivers/md/bcache/journal.c179
-rw-r--r--drivers/md/bcache/journal.h4
-rw-r--r--drivers/md/bcache/request.c41
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c297
-rw-r--r--drivers/md/bcache/sysfs.c76
-rw-r--r--drivers/md/bcache/util.h28
-rw-r--r--drivers/md/bcache/writeback.c8
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c5
-rw-r--r--drivers/media/platform/vivid/vivid-osd.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c9
-rw-r--r--drivers/memory/tegra/mc.c2
-rw-r--r--drivers/memstick/core/memstick.c13
-rw-r--r--drivers/mfd/intel-lpss.c1
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c7
-rw-r--r--drivers/net/bonding/bond_options.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c6
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ppp/ppp_deflate.c20
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c39
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c1
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c43
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/pci/p2pdma.c10
-rw-r--r--drivers/pci/pci-driver.c16
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/intel_speed_select_if/Kconfig17
-rw-r--r--drivers/platform/x86/intel_speed_select_if/Makefile10
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_common.c672
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_common.h69
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c216
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c214
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c180
-rw-r--r--drivers/platform/x86/intel_turbo_max_3.c3
-rw-r--r--drivers/platform/x86/pmc_atom.c8
-rw-r--r--drivers/s390/block/dasd_fba.c21
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c33
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c38
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c120
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h15
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c3
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c157
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c3
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c17
-rw-r--r--drivers/tty/serial/serial_core.c7
-rw-r--r--drivers/tty/serial/xilinx_uartps.c11
-rw-r--r--drivers/usb/gadget/function/u_ether.c6
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c5
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c3
-rw-r--r--fs/btrfs/ctree.c57
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/delayed-inode.c3
-rw-r--r--fs/ocfs2/dlmglue.c82
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--include/linux/cgroup.h10
-rw-r--r--include/linux/cpufreq.h12
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--include/linux/mm.h5
-rw-r--r--include/uapi/linux/btf.h2
-rw-r--r--include/uapi/linux/isst_if.h172
-rw-r--r--init/init_task.c1
-rw-r--r--kernel/kthread.c3
-rw-r--r--kernel/livepatch/transition.c11
-rw-r--r--kernel/memremap.c2
-rw-r--r--kernel/sched/fair.c15
-rw-r--r--kernel/trace/bpf_trace.c100
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/cpumask.c3
-rw-r--r--lib/scatterlist.c9
-rw-r--r--mm/huge_memory.c13
-rw-r--r--mm/memory.c7
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_ext.c2
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/bridge/br_if.c13
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/ethtool.c12
-rw-r--r--net/core/pktgen.c14
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/ipv4/igmp.c47
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/llc/llc_output.c2
-rw-r--r--net/mac80211/ieee80211_i.h9
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c13
-rw-r--r--net/packet/af_packet.c41
-rw-r--r--net/qrtr/qrtr.c3
-rw-r--r--net/rds/ib_frmr.c1
-rw-r--r--net/unix/af_unix.c10
-rw-r--r--net/vmw_vsock/virtio_transport_common.c7
-rw-r--r--net/wireless/util.c2
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/soc/codecs/cs4265.c2
-rw-r--r--sound/soc/codecs/max98090.c16
-rw-r--r--sound/soc/soc-pcm.c3
-rw-r--r--sound/usb/format.c46
-rw-r--r--sound/usb/helper.c17
-rw-r--r--sound/usb/helper.h1
-rw-r--r--sound/usb/pcm.c4
-rw-r--r--sound/usb/quirks-table.h151
-rw-r--r--sound/usb/quirks.c137
-rw-r--r--tools/Makefile13
-rw-r--r--tools/power/x86/intel-speed-select/Build1
-rw-r--r--tools/power/x86/intel-speed-select/Makefile56
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c1607
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c721
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c479
-rw-r--r--tools/power/x86/intel-speed-select/isst.h231
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/kvm_main.c2
218 files changed, 7013 insertions, 1047 deletions
diff --git a/Makefile b/Makefile
index 2737d61afbae..7bee6dbd03cf 100644
--- a/Makefile
+++ b/Makefile
@@ -815,6 +815,10 @@ KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
endif
+ifdef CONFIG_LIVEPATCH
+KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone)
+endif
+
# arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
CHECKFLAGS += $(NOSTDINC_FLAGS)
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index 9251e13e144f..dbbdf1d19ee6 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -3,6 +3,7 @@
#include <linux/smp.h>
#include <linux/threads.h>
+#include <linux/numa.h>
#include <asm/machvec.h>
#ifdef CONFIG_NUMA
@@ -28,7 +29,7 @@ static const struct cpumask *cpumask_of_node(int node)
{
int cpu;
- if (node == -1)
+ if (node == NUMA_NO_NODE)
return cpu_all_mask;
cpumask_clear(&node_to_cpumask_map[node]);
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index efbeb3e0dcfb..70568e6db77b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -54,7 +54,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
+ bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
if (!may_use_simd())
return crypto_sha1_finup(desc, data, len, out);
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index fd1ff2b13dfa..af8472aded42 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -59,7 +59,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
+ bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
if (!may_use_simd()) {
if (len)
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index 92c376279c6d..1315da6c7aeb 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -74,7 +74,7 @@ void __init build_cpu_to_node_map(void)
cpumask_clear(&node_to_cpu_mask[node]);
for_each_possible_early_cpu(cpu) {
- node = -1;
+ node = NUMA_NO_NODE;
for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
node = node_cpuid[i].nid;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 38627af8fc39..e843efe3a92a 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -231,7 +231,7 @@ void __init setup_per_cpu_areas(void)
* CPUs are put into groups according to node. Walk cpu_map
* and create new groups at node boundaries.
*/
- prev_node = -1;
+ prev_node = NUMA_NO_NODE;
ai->nr_groups = 0;
for (unit = 0; unit < nr_units; unit++) {
cpu = cpu_map[unit];
@@ -476,7 +476,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
{
void *ptr = NULL;
u8 best = 0xff;
- int bestnode = -1, node, anynode = 0;
+ int bestnode = NUMA_NO_NODE, node, anynode = 0;
for_each_online_node(node) {
if (node_isset(node, memory_less_mask))
@@ -488,7 +488,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
anynode = node;
}
- if (bestnode == -1)
+ if (bestnode == NUMA_NO_NODE)
bestnode = anynode;
ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c
index 0153a9c6f4af..98ea4f4d3dde 100644
--- a/arch/powerpc/crypto/crc-vpmsum_test.c
+++ b/arch/powerpc/crypto/crc-vpmsum_test.c
@@ -78,16 +78,12 @@ static int __init crc_test_init(void)
pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations);
for (i=0; i<iterations; i++) {
- size_t len, offset;
+ size_t offset = prandom_u32_max(16);
+ size_t len = prandom_u32_max(MAX_CRC_LENGTH);
- get_random_bytes(data, MAX_CRC_LENGTH);
- get_random_bytes(&len, sizeof(len));
- get_random_bytes(&offset, sizeof(offset));
-
- len %= MAX_CRC_LENGTH;
- offset &= 15;
if (len <= offset)
continue;
+ prandom_bytes(data, len);
len -= offset;
crypto_shash_update(crct10dif_shash, data+offset, len);
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a0e3bb565e15..b6645cf4675d 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -300,9 +300,13 @@
#define H_SCM_UNBIND_MEM 0x3F0
#define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4
#define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8
-#define H_SCM_MEM_QUERY 0x3FC
-#define H_SCM_BLOCK_CLEAR 0x400
-#define MAX_HCALL_OPCODE H_SCM_BLOCK_CLEAR
+#define H_SCM_UNBIND_ALL 0x3FC
+#define H_SCM_HEALTH 0x400
+#define MAX_HCALL_OPCODE H_SCM_HEALTH
+
+/* Scope args for H_SCM_UNBIND_ALL */
+#define H_UNBIND_SCOPE_ALL (0x1)
+#define H_UNBIND_SCOPE_DRC (0x2)
/* H_VIOCTL functions */
#define H_GET_VIOA_DUMP_SIZE 0x01
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index fd579c9b464a..f149ccc1818f 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/ioport.h>
+#include <linux/numa.h>
struct device_node;
@@ -266,7 +267,7 @@ extern int pcibios_map_io_space(struct pci_bus *bus);
#ifdef CONFIG_NUMA
#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE))
#else
-#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = -1)
+#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = NUMA_NO_NODE)
#endif
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 9fa263ad7cb3..b4e538f3e6fd 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -72,6 +72,21 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
+
+#ifdef CONFIG_PPC64
+#define is_ioremap_addr is_ioremap_addr
+static inline bool is_ioremap_addr(const void *x)
+{
+#ifdef CONFIG_MMU
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= IOREMAP_BASE && addr < IOREMAP_END;
+#else
+ return false;
+#endif
+}
+#endif /* CONFIG_PPC64 */
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index efa54926428e..b23fd2bcb1d1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1705,7 +1705,7 @@ handle_page_fault:
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
cmpdi r3,0
- beq+ 12f
+ beq+ ret_from_except_lite
bl save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1720,7 +1720,12 @@ handle_dabr_fault:
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_break
-12: b ret_from_except_lite
+ /*
+ * do_break() may have changed the NV GPRS while handling a breakpoint.
+ * If so, we need to restore them with their updated values. Don't use
+ * ret_from_except_lite here.
+ */
+ b ret_from_except
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index ffc04c7af6e8..f611575d339f 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -32,6 +32,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
+#include <linux/numa.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -132,7 +133,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
int nid = of_node_to_nid(dev);
if (nid < 0 || !node_online(nid))
- nid = -1;
+ nid = NUMA_NO_NODE;
PHB_SET_NODE(phb, nid);
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 565bf078c92e..1ce9a1fea5e2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -216,9 +216,9 @@ static void initialize_distance_lookup_table(int nid,
*/
static int associativity_to_nid(const __be32 *associativity)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
- if (min_common_depth == -1)
+ if (!numa_enabled)
goto out;
if (of_read_number(associativity, 1) >= min_common_depth)
@@ -226,7 +226,7 @@ static int associativity_to_nid(const __be32 *associativity)
/* POWER4 LPAR uses 0xffff as invalid node */
if (nid == 0xffff || nid >= MAX_NUMNODES)
- nid = -1;
+ nid = NUMA_NO_NODE;
if (nid > 0 &&
of_read_number(associativity, 1) >= distance_ref_points_depth) {
@@ -245,7 +245,7 @@ out:
*/
static int of_node_to_nid_single(struct device_node *device)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
const __be32 *tmp;
tmp = of_get_associativity(device);
@@ -257,7 +257,7 @@ static int of_node_to_nid_single(struct device_node *device)
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
of_node_get(device);
while (device) {
@@ -422,17 +422,19 @@ static int of_get_assoc_arrays(struct assoc_arrays *aa)
static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
{
struct assoc_arrays aa = { .arrays = NULL };
- int default_nid = 0;
+ int default_nid = NUMA_NO_NODE;
int nid = default_nid;
int rc, index;
+ if ((min_common_depth < 0) || !numa_enabled)
+ return default_nid;
+
rc = of_get_assoc_arrays(&aa);
if (rc)
return default_nid;
- if (min_common_depth > 0 && min_common_depth <= aa.array_sz &&
- !(lmb->flags & DRCONF_MEM_AI_INVALID) &&
- lmb->aa_index < aa.n_arrays) {
+ if (min_common_depth <= aa.array_sz &&
+ !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
nid = of_read_number(&aa.arrays[index], 1);
@@ -455,7 +457,7 @@ static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
*/
static int numa_setup_cpu(unsigned long lcpu)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
struct device_node *cpu;
/*
@@ -632,8 +634,14 @@ static int __init parse_numa_properties(void)
min_common_depth = find_min_common_depth();
- if (min_common_depth < 0)
+ if (min_common_depth < 0) {
+ /*
+ * if we fail to parse min_common_depth from device tree
+ * mark the numa disabled, boot with numa disabled.
+ */
+ numa_enabled = false;
return min_common_depth;
+ }
dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
@@ -749,7 +757,7 @@ void __init dump_numa_cpu_topology(void)
unsigned int node;
unsigned int cpu, count;
- if (min_common_depth == -1 || !numa_enabled)
+ if (!numa_enabled)
return;
for_each_online_node(node) {
@@ -810,7 +818,7 @@ static void __init find_possible_nodes(void)
struct device_node *rtas;
u32 numnodes, i;
- if (min_common_depth <= 0)
+ if (!numa_enabled)
return;
rtas = of_find_node_by_path("/rtas");
@@ -931,7 +939,7 @@ static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
{
struct drmem_lmb *lmb;
unsigned long lmb_size;
- int nid = -1;
+ int nid = NUMA_NO_NODE;
lmb_size = drmem_lmb_size();
@@ -961,7 +969,7 @@ static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
static int hot_add_node_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory;
- int nid = -1;
+ int nid = NUMA_NO_NODE;
for_each_node_by_type(memory, "memory") {
unsigned long start, size;
@@ -1006,7 +1014,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
struct device_node *memory = NULL;
int nid;
- if (!numa_enabled || (min_common_depth < 0))
+ if (!numa_enabled)
return first_online_node;
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index bba281b1fe1b..32f1e3a33650 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/libnvdimm.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include <asm/plpar_wrappers.h>
@@ -42,8 +43,9 @@ struct papr_scm_priv {
static int drc_pmem_bind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
- uint64_t rc, token;
uint64_t saved = 0;
+ uint64_t token;
+ int64_t rc;
/*
* When the hypervisor cannot map all the requested memory in a single
@@ -63,6 +65,10 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
} while (rc == H_BUSY);
if (rc) {
+ /* H_OVERLAP needs a separate error path */
+ if (rc == H_OVERLAP)
+ return -EBUSY;
+
dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
return -ENXIO;
}
@@ -77,22 +83,36 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
static int drc_pmem_unbind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
- uint64_t rc, token;
+ uint64_t token = 0;
+ int64_t rc;
- token = 0;
+ dev_dbg(&p->pdev->dev, "unbind drc %x\n", p->drc_index);
- /* NB: unbind has the same retry requirements mentioned above */
+ /* NB: unbind has the same retry requirements as drc_pmem_bind() */
do {
- rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
- p->bound_addr, p->blocks, token);
+
+ /* Unbind of all SCM resources associated with drcIndex */
+ rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
+ p->drc_index, token);
token = ret[0];
- cond_resched();
+
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+
} while (rc == H_BUSY);
if (rc)
dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
+ else
+ dev_dbg(&p->pdev->dev, "unbind drc %x complete\n",
+ p->drc_index);
- return !!rc;
+ return rc == H_SUCCESS ? 0 : -ENXIO;
}
static int papr_scm_meta_get(struct papr_scm_priv *p,
@@ -315,6 +335,14 @@ static int papr_scm_probe(struct platform_device *pdev)
/* request the hypervisor to bind this region to somewhere in memory */
rc = drc_pmem_bind(p);
+
+ /* If phyp says drc memory still bound then force unbound and retry */
+ if (rc == -EBUSY) {
+ dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
+ drc_pmem_unbind(p);
+ rc = drc_pmem_bind(p);
+ }
+
if (rc)
goto err;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 256695cedb45..11e743357bcc 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -39,7 +39,7 @@
*/
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
-#define KVM_HALT_POLL_NS_DEFAULT 80000
+#define KVM_HALT_POLL_NS_DEFAULT 50000
/* s390-specific vcpu->requests bit members */
#define KVM_REQ_ENABLE_IBS KVM_ARCH_REQ(0)
@@ -307,6 +307,9 @@ struct kvm_vcpu_stat {
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
+#ifndef __GENKSYMS__
+ u64 halt_no_poll_steal;
+#endif
u64 halt_wakeup;
u64 instruction_lctl;
u64 instruction_lctlg;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 4b028396e539..069c13c4e9ef 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -98,7 +98,11 @@ struct lowcore {
/* Current process. */
__u64 current_task; /* 0x0328 */
- __u8 pad_0x318[0x320-0x318]; /* 0x0330 */
+#ifndef __GENKSYMS__
+ __u64 avg_steal_timer; /* 0x0330 */
+#else
+ __u8 pad_0x318[0x320-0x318]; /* 0x0330 */
+#endif
__u64 kernel_stack; /* 0x0338 */
/* Interrupt, panic and restart stack. */
@@ -139,7 +143,7 @@ struct lowcore {
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
- __u8 pad_0x04c0[0x0500-0x0440]; /* 0x0440 */
+ __u8 pad_0x04c0[0x0500-0x0440]; /* 0x0440 */
/* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0500 */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 07383496f1d3..0400a8dfc63d 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -274,7 +274,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
- lc->user_timer = lc->system_timer = lc->steal_timer = 0;
+ lc->user_timer = lc->system_timer =
+ lc->steal_timer = lc->avg_steal_timer = 0;
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 072d84ba42a3..a6f23238c3b0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p,
*/
static int do_account_vtime(struct task_struct *tsk)
{
- u64 timer, clock, user, guest, system, hardirq, softirq, steal;
+ u64 timer, clock, user, guest, system, hardirq, softirq;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -185,12 +185,6 @@ static int do_account_vtime(struct task_struct *tsk)
account_system_index_scaled(tsk, softirq, scale_vtime(softirq),
CPUTIME_SOFTIRQ);
- steal = S390_lowcore.steal_timer;
- if ((s64) steal > 0) {
- S390_lowcore.steal_timer = 0;
- account_steal_time(cputime_to_nsecs(steal));
- }
-
return virt_timer_forward(user + guest + system + hardirq + softirq);
}
@@ -216,8 +210,19 @@ void vtime_task_switch(struct task_struct *prev)
*/
void vtime_flush(struct task_struct *tsk)
{
+ u64 steal, avg_steal;
+
if (do_account_vtime(tsk))
virt_timer_expire();
+
+ steal = S390_lowcore.steal_timer;
+ avg_steal = S390_lowcore.avg_steal_timer / 2;
+ if ((s64) steal > 0) {
+ S390_lowcore.steal_timer = 0;
+ account_steal_time(steal);
+ avg_steal += steal;
+ }
+ S390_lowcore.avg_steal_timer = avg_steal;
}
/*
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index b1900239b0ab..b538ca407b9a 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -29,6 +29,7 @@ config KVM
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_INVALID_WAKEUPS
+ select HAVE_KVM_NO_POLL
select SRCU
select KVM_VFIO
---help---
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7fc5bdf85a57..c21aed7f3c42 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -74,6 +74,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
+ { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -165,6 +166,11 @@ MODULE_PARM_DESC(hpage, "1m huge page backing support");
*/
#define SIZE_INTERNAL 16
+/* maximum percentage of steal time for polling. >100 is treated like 100 */
+static u8 halt_poll_max_steal = 10;
+module_param(halt_poll_max_steal, byte, 0644);
+MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
+
/*
* Base feature mask that defines default mask for facilities. Consists of the
* defines in FACILITIES_KVM and the non-hypervisor managed bits.
@@ -2780,6 +2786,17 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
}
}
+bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+ /* do not poll with more than halt_poll_max_steal percent of steal time */
+ if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
+ halt_poll_max_steal) {
+ vcpu->stat.halt_no_poll_steal++;
+ return true;
+ }
+ return false;
+}
+
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
/* kvm common code refers to this, but never calls it */
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index 11a1f0d289d2..3de217b5c3e2 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/of_device.h>
+#include <linux/numa.h>
#include <asm/prom.h>
#include <asm/irq.h>
@@ -415,7 +416,7 @@ static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
struct device_node *dp = op->dev.of_node;
int err;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 12;
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index c664d3e3aa8d..3b722a8d2d63 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
+#include <linux/numa.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -1346,7 +1347,7 @@ static int schizo_pbm_init(struct pci_pbm_info *pbm,
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 8;
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index 8db48e808ed4..a068464177e7 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/numa.h>
#include <asm/upa.h>
@@ -453,7 +454,7 @@ void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct platform_device *op
struct device_node *dp = op->dev.of_node;
pbm->name = dp->full_name;
- pbm->numa_node = -1;
+ pbm->numa_node = NUMA_NO_NODE;
pbm->chip_type = chip_type;
pbm->chip_version = of_getintprop_default(dp, "version#", 0);
pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0);
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index be5bdf93c767..41eaed068e5f 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/numa.h>
#include <asm/page.h>
#include <asm/io.h>
@@ -560,7 +561,7 @@ static void __init sbus_iommu_init(struct platform_device *op)
op->dev.archdata.iommu = iommu;
op->dev.archdata.stc = strbuf;
- op->dev.archdata.numa_node = -1;
+ op->dev.archdata.numa_node = NUMA_NO_NODE;
reg_base = regs + SYSIO_IOMMUREG_BASE;
iommu->iommu_control = reg_base + IOMMU_CONTROL;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 28c573b12cca..0a51dc5d0e1b 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -936,13 +936,13 @@ static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
{
int prev_nid, new_nid;
- prev_nid = -1;
+ prev_nid = NUMA_NO_NODE;
for ( ; start < end; start += PAGE_SIZE) {
for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
struct node_mem_mask *p = &node_masks[new_nid];
if ((start & p->mask) == p->match) {
- if (prev_nid == -1)
+ if (prev_nid == NUMA_NO_NODE)
prev_nid = new_nid;
break;
}
@@ -1167,7 +1167,7 @@ int of_node_to_nid(struct device_node *dp)
md = mdesc_grab();
count = 0;
- nid = -1;
+ nid = NUMA_NO_NODE;
mdesc_for_each_node_by_name(md, grp, "group") {
if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
nid = count;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b43d2c781d2f..16a07b6b32ee 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -114,7 +114,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
}
#define KVM_PERMILLE_MMU_PAGES 20
-#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MIN_ALLOC_MMU_PAGES 64UL
#define KVM_MMU_HASH_SHIFT 12
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5
@@ -789,9 +789,15 @@ struct kvm_sev_info {
};
struct kvm_arch {
- unsigned int n_used_mmu_pages;
- unsigned int n_requested_mmu_pages;
- unsigned int n_max_mmu_pages;
+#ifndef __GENKSYMS__
+ unsigned long n_used_mmu_pages;
+ unsigned long n_requested_mmu_pages;
+ unsigned long n_max_mmu_pages;
+#else
+ unsigned int n_used_mmu_pages;
+ unsigned int n_requested_mmu_pages;
+ unsigned int n_max_mmu_pages;
+#endif
unsigned int indirect_shadow_pages;
unsigned long mmu_valid_gen;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
@@ -1174,8 +1180,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
bool pdptrs_changed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index f513cc231151..4bb56ad26da5 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
+#include <linux/numa.h>
#include <asm/io.h>
#include <asm/pat.h>
#include <asm/x86_init.h>
@@ -151,7 +152,7 @@ cpumask_of_pcibus(const struct pci_bus *bus)
int node;
node = __pcibus_to_node(bus);
- return (node == -1) ? cpu_online_mask :
+ return (node == NUMA_NO_NODE) ? cpu_online_mask :
cpumask_of_node(node);
}
#endif
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b6ecac7671ee..08ede5b768e0 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -27,6 +27,7 @@
#include <linux/crash_dump.h>
#include <linux/reboot.h>
#include <linux/memory.h>
+#include <linux/numa.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
@@ -1411,7 +1412,7 @@ static void __init build_socket_tables(void)
}
/* Set socket -> node values: */
- lnid = -1;
+ lnid = NUMA_NO_NODE;
for_each_present_cpu(cpu) {
int nid = cpu_to_node(cpu);
int apicid, sockid;
@@ -1542,7 +1543,7 @@ static void __init uv_system_init_hub(void)
new_hub->pnode = 0xffff;
new_hub->numa_blade_id = uv_node_to_blade_id(nodeid);
- new_hub->memory_nid = -1;
+ new_hub->memory_nid = NUMA_NO_NODE;
new_hub->nr_possible_cpus = 0;
new_hub->nr_online_cpus = 0;
}
@@ -1559,7 +1560,7 @@ static void __init uv_system_init_hub(void)
uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++;
- if (uv_cpu_hub_info(cpu)->memory_nid == -1)
+ if (uv_cpu_hub_info(cpu)->memory_nid == NUMA_NO_NODE)
uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
/* Init memoryless node: */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ac8c0a5a9407..1b3e5fc33157 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -56,6 +56,7 @@
#include <linux/stackprotector.h>
#include <linux/gfp.h>
#include <linux/cpuidle.h>
+#include <linux/numa.h>
#include <asm/acpi.h>
#include <asm/desc.h>
@@ -904,7 +905,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
/* reduce the number of lines printed when booting a large cpu count system */
static void announce_cpu(int cpu, int apicid)
{
- static int current_node = -1;
+ static int current_node = NUMA_NO_NODE;
int node = early_cpu_to_node(cpu);
static int width, node_width;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 64bcf60e9393..42cca05f05dc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1941,7 +1941,7 @@ static int is_empty_shadow_page(u64 *spt)
* aggregate version in order to make the slab shrinker
* faster
*/
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
{
kvm->arch.n_used_mmu_pages += nr;
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2681,7 +2681,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
* Changing the number of mmu pages allocated to the vm
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
*/
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
{
LIST_HEAD(invalid_list);
@@ -5592,10 +5592,10 @@ out:
/*
* Caculate mmu pages needed for kvm.
*/
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
- unsigned int nr_mmu_pages;
- unsigned int nr_pages = 0;
+ unsigned long nr_mmu_pages;
+ unsigned long nr_pages = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i;
@@ -5608,8 +5608,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
}
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
- nr_mmu_pages = max(nr_mmu_pages,
- (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+ nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
return nr_mmu_pages;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1092302aa16a..de14d69901cc 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -67,7 +67,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len);
-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
{
if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
return kvm->arch.n_max_mmu_pages -
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index da04616e2a9d..626cffd9d96d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5076,10 +5076,13 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
kvm_lapic_set_irr(vec, vcpu->arch.apic);
smp_mb__after_atomic();
- if (avic_vcpu_is_running(vcpu))
- wrmsrl(SVM_AVIC_DOORBELL,
- kvm_cpu_get_apicid(vcpu->cpu));
- else
+ if (avic_vcpu_is_running(vcpu)) {
+ int cpuid = vcpu->cpu;
+
+ if (cpuid != get_cpu())
+ wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
+ put_cpu();
+ } else
kvm_vcpu_wake_up(vcpu);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8e6011eb5d93..574c3d3bd441 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3908,7 +3908,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
}
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
- u32 kvm_nr_mmu_pages)
+ unsigned long kvm_nr_mmu_pages)
{
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
@@ -3922,7 +3922,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
return 0;
}
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
return kvm->arch.n_max_mmu_pages;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index ff20cd4964a0..698ff54f655b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -198,62 +198,29 @@ struct jit_context {
#define BPF_MAX_INSN_SIZE 128
#define BPF_INSN_SAFETY 64
-#define AUX_STACK_SPACE \
- (32 /* space for rbx, r13, r14, r15 */ + \
- 8 /* space for skb_copy_bits() buffer */)
-
-#define PROLOGUE_SIZE 37
+#define PROLOGUE_SIZE 20
/* emit x64 prologue code for BPF program and check it's size.
* bpf_tail_call helper will skip it while jumping into another program
*/
-static void emit_prologue(u8 **pprog, u32 stack_depth)
+static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
{
u8 *prog = *pprog;
int cnt = 0;
- EMIT1(0x55); /* push rbp */
- EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
-
- /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
- EMIT3_off32(0x48, 0x81, 0xEC,
- round_up(stack_depth, 8) + AUX_STACK_SPACE);
-
- /* sub rbp, AUX_STACK_SPACE */
- EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
-
- /* all classic BPF filters use R6(rbx) save it */
-
- /* mov qword ptr [rbp+0],rbx */
- EMIT4(0x48, 0x89, 0x5D, 0);
-
- /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
- * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
- * R8(r14). R9(r15) spill could be made conditional, but there is only
- * one 'bpf_error' return path out of helper functions inside bpf_jit.S
- * The overhead of extra spill is negligible for any filter other
- * than synthetic ones. Therefore not worth adding complexity.
- */
-
- /* mov qword ptr [rbp+8],r13 */
- EMIT4(0x4C, 0x89, 0x6D, 8);
- /* mov qword ptr [rbp+16],r14 */
- EMIT4(0x4C, 0x89, 0x75, 16);
- /* mov qword ptr [rbp+24],r15 */
- EMIT4(0x4C, 0x89, 0x7D, 24);
-
- /* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
- * we need to reset the counter to 0. It's done in two instructions,
- * resetting rax register to 0 (xor on eax gets 0 extended), and
- * moving it to the counter location.
- */
-
- /* xor eax, eax */
- EMIT2(0x31, 0xc0);
- /* mov qword ptr [rbp+32], rax */
- EMIT4(0x48, 0x89, 0x45, 32);
-
- BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
+ EMIT1(0x55); /* push rbp */
+ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
+ /* sub rsp, rounded_stack_depth */
+ EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
+ EMIT1(0x53); /* push rbx */
+ EMIT2(0x41, 0x55); /* push r13 */
+ EMIT2(0x41, 0x56); /* push r14 */
+ EMIT2(0x41, 0x57); /* push r15 */
+ if (!ebpf_from_cbpf) {
+ /* zero init tail_call_cnt */
+ EMIT2(0x6a, 0x00);
+ BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
+ }
*pprog = prog;
}
@@ -293,13 +260,13 @@ static void emit_bpf_tail_call(u8 **pprog)
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
+ EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */
+ EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
/* prog = array->ptrs[index]; */
EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
@@ -364,7 +331,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int proglen = 0;
u8 *prog = temp;
- emit_prologue(&prog, bpf_prog->aux->stack_depth);
+ emit_prologue(&prog, bpf_prog->aux->stack_depth,
+ bpf_prog_was_classic(bpf_prog));
if (seen_ld_abs)
emit_load_skb_data_hlen(&prog);
@@ -1073,19 +1041,14 @@ common_load:
seen_exit = true;
/* update cleanup_addr */
ctx->cleanup_addr = proglen;
- /* mov rbx, qword ptr [rbp+0] */
- EMIT4(0x48, 0x8B, 0x5D, 0);
- /* mov r13, qword ptr [rbp+8] */
- EMIT4(0x4C, 0x8B, 0x6D, 8);
- /* mov r14, qword ptr [rbp+16] */
- EMIT4(0x4C, 0x8B, 0x75, 16);
- /* mov r15, qword ptr [rbp+24] */
- EMIT4(0x4C, 0x8B, 0x7D, 24);
-
- /* add rbp, AUX_STACK_SPACE */
- EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
- EMIT1(0xC9); /* leave */
- EMIT1(0xC3); /* ret */
+ if (!bpf_prog_was_classic(bpf_prog))
+ EMIT1(0x5B); /* get rid of tail_call_cnt */
+ EMIT2(0x41, 0x5F); /* pop r15 */
+ EMIT2(0x41, 0x5E); /* pop r14 */
+ EMIT2(0x41, 0x5D); /* pop r13 */
+ EMIT1(0x5B); /* pop rbx */
+ EMIT1(0xC9); /* leave */
+ EMIT1(0xC3); /* ret */
break;
default:
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 4d6f51bcdfab..af8afe5c06ea 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -67,6 +67,8 @@ struct chachapoly_req_ctx {
unsigned int cryptlen;
/* Actual AD, excluding IV */
unsigned int assoclen;
+ /* request flags, with MAY_SLEEP cleared if needed */
+ u32 flags;
union {
struct poly_req poly;
struct chacha_req chacha;
@@ -76,8 +78,12 @@ struct chachapoly_req_ctx {
static inline void async_done_continue(struct aead_request *req, int err,
int (*cont)(struct aead_request *))
{
- if (!err)
+ if (!err) {
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = cont(req);
+ }
if (err != -EINPROGRESS && err != -EBUSY)
aead_request_complete(req, err);
@@ -144,7 +150,7 @@ static int chacha_decrypt(struct aead_request *req)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
}
- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
chacha_decrypt_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, src, dst,
@@ -188,7 +194,7 @@ static int poly_tail(struct aead_request *req)
memcpy(&preq->tail.cryptlen, &len, sizeof(len));
sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_tail_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src,
@@ -219,7 +225,7 @@ static int poly_cipherpad(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, &preq->pad, padlen);
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_cipherpad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@ -250,7 +256,7 @@ static int poly_cipher(struct aead_request *req)
sg_init_table(rctx->src, 2);
crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_cipher_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
@@ -280,7 +286,7 @@ static int poly_adpad(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, preq->pad, padlen);
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_adpad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@ -304,7 +310,7 @@ static int poly_ad(struct aead_request *req)
struct poly_req *preq = &rctx->u.poly;
int err;
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_ad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
@@ -331,7 +337,7 @@ static int poly_setkey(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_setkey_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
@@ -355,7 +361,7 @@ static int poly_init(struct aead_request *req)
struct poly_req *preq = &rctx->u.poly;
int err;
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_init_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
@@ -393,7 +399,7 @@ static int poly_genkey(struct aead_request *req)
chacha_iv(creq->iv, req, 0);
- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
poly_genkey_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
@@ -433,7 +439,7 @@ static int chacha_encrypt(struct aead_request *req)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
}
- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
chacha_encrypt_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, src, dst,
@@ -451,6 +457,7 @@ static int chachapoly_encrypt(struct aead_request *req)
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
rctx->cryptlen = req->cryptlen;
+ rctx->flags = aead_request_flags(req);
/* encrypt call chain:
* - chacha_encrypt/done()
@@ -472,6 +479,7 @@ static int chachapoly_decrypt(struct aead_request *req)
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
+ rctx->flags = aead_request_flags(req);
/* decrypt call chain:
* - poly_genkey/done()
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 12ad3e3a84e3..73b56f2f44f1 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+ be128 k;
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_shash *tfm,
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
- ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+
+ BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
+ memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
+ ctx->gf128 = gf128mul_init_4k_lle(&k);
+ memzero_explicit(&k, GHASH_BLOCK_SIZE);
+
if (!ctx->gf128)
return -ENOMEM;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index ac3b19ad8544..1687dd70d748 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -40,6 +40,7 @@
#include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/prefetch.h>
+#include <linux/numa.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -4105,9 +4106,9 @@ static int get_least_used_cpu_on_node(int node)
/* Helper for selecting a node in round robin mode */
static inline int mtip_get_next_rr_node(void)
{
- static int next_node = -1;
+ static int next_node = NUMA_NO_NODE;
- if (next_node == -1) {
+ if (next_node == NUMA_NO_NODE) {
next_node = first_online_node;
return next_node;
}
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4796aa11ccc5..c98c22ab4529 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -914,8 +914,10 @@ static void __init acpi_cpufreq_boost_init(void)
{
int ret;
- if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
+ if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
+ pr_debug("Boost capabilities not present in the processor\n");
return;
+ }
acpi_cpufreq_driver.set_boost = set_boost;
acpi_cpufreq_driver.boost_enabled = boost_state(0);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 7281a2c19c36..39c462711eae 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -468,12 +468,12 @@ static int brcm_avs_set_pstate(struct private_data *priv, unsigned int pstate)
return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, true, args);
}
-static unsigned long brcm_avs_get_voltage(void __iomem *base)
+static u32 brcm_avs_get_voltage(void __iomem *base)
{
return readl(base + AVS_MBOX_VOLTAGE1);
}
-static unsigned long brcm_avs_get_frequency(void __iomem *base)
+static u32 brcm_avs_get_frequency(void __iomem *base)
{
return readl(base + AVS_MBOX_FREQUENCY) * 1000; /* in kHz */
}
@@ -762,8 +762,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
rc = brcm_avs_get_pmap(priv, NULL);
magic = readl(priv->base + AVS_MBOX_MAGIC);
- return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
- (rc != -EINVAL);
+ return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
+ (rc != -EINVAL));
}
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
@@ -973,14 +973,14 @@ static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
{
struct private_data *priv = policy->driver_data;
- return sprintf(buf, "0x%08lx\n", brcm_avs_get_voltage(priv->base));
+ return sprintf(buf, "0x%08x\n", brcm_avs_get_voltage(priv->base));
}
static ssize_t show_brcm_avs_frequency(struct cpufreq_policy *policy, char *buf)
{
struct private_data *priv = policy->driver_data;
- return sprintf(buf, "0x%08lx\n", brcm_avs_get_frequency(priv->base));
+ return sprintf(buf, "0x%08x\n", brcm_avs_get_frequency(priv->base));
}
cpufreq_freq_attr_ro(brcm_avs_pstate);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c8c70214e891..cc159a594936 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -554,13 +554,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int ret, enable;
@@ -1528,17 +1528,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
unsigned int ret_freq = 0;
- if (!cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
/*
- * Updating inactive policies is invalid, so avoid doing that. Also
- * if fast frequency switching is used with the given policy, the check
+ * If fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case too.
*/
- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+ if (policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
@@ -1567,10 +1566,7 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
-
- if (!policy_is_inactive(policy))
- ret_freq = __cpufreq_get(policy);
-
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 689817486ce8..08bc86edca54 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1065,7 +1065,7 @@ static void intel_pstate_debug_hide_params(void)
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
- (struct kobject *kobj, struct attribute *attr, char *buf) \
+ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", global.object); \
}
@@ -1074,7 +1074,7 @@ static ssize_t intel_pstate_show_status(char *buf);
static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -1085,7 +1085,7 @@ static ssize_t show_status(struct kobject *kobj,
return ret;
}
-static ssize_t store_status(struct kobject *a, struct attribute *b,
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
@@ -1099,7 +1099,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
}
static ssize_t show_turbo_pct(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total, no_turbo, turbo_pct;
@@ -1125,7 +1125,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
}
static ssize_t show_num_pstates(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total;
@@ -1146,7 +1146,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
}
static ssize_t show_no_turbo(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -1168,7 +1168,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return ret;
}
-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1215,7 +1215,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1245,7 +1245,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1277,12 +1277,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
}
static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", hwp_boost);
}
-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
+static ssize_t store_hwp_dynamic_boost(struct kobject *a,
+ struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index c2dd43f3f5d8..8d63a6dc8383 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) {
dev_err(priv.dev, "Unable to get cpuclk\n");
- return PTR_ERR(priv.cpu_clk);
+ err = PTR_ERR(priv.cpu_clk);
+ goto out_node;
}
err = clk_prepare_enable(priv.cpu_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare cpuclk\n");
- return err;
+ goto out_node;
}
kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
goto out_ddr;
}
- of_node_put(np);
- np = NULL;
-
err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
- if (!err)
- return 0;
+ if (err) {
+ dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ goto out_powersave;
+ }
- dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ of_node_put(np);
+ return 0;
+out_powersave:
clk_disable_unprepare(priv.powersave_clk);
out_ddr:
clk_disable_unprepare(priv.ddr_clk);
out_cpu:
clk_disable_unprepare(priv.cpu_clk);
+out_node:
of_node_put(np);
return err;
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 35dd4d7ffee0..58c933f48300 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu = of_get_cpu_node(policy->cpu, NULL);
+ of_node_put(cpu);
if (!cpu)
goto out;
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index ff44016ea031..641f8021855a 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -551,6 +551,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
if (!voltage_gpio){
pr_err("missing cpu-vcore-select gpio\n");
return 1;
@@ -587,6 +588,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
pvr = mfspr(SPRN_PVR);
has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 5a4c5a639f61..2eaeebcc93af 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
!cbe_get_cpu_mic_tm_regs(policy->cpu)) {
pr_info("invalid CBE regs pointers for cpufreq\n");
+ of_node_put(cpu);
return -EINVAL;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 89291c15015c..3f768699332b 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -79,8 +80,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
return -EINVAL;
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
- (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
- (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+ (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
(req->nbytes & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
@@ -291,7 +291,7 @@ static struct ccp_aes_def aes_algs[] = {
.version = CCP_VERSION(3, 0),
.name = "cfb(aes)",
.driver_name = "cfb-aes-ccp",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_defaults,
},
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 6032c6a3aaa7..977eabb1573a 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -35,56 +35,62 @@ struct ccp_tasklet_data {
};
/* Human-readable error strings */
+#define CCP_MAX_ERROR_CODE 64
static char *ccp_error_codes[] = {
"",
- "ERR 01: ILLEGAL_ENGINE",
- "ERR 02: ILLEGAL_KEY_ID",
- "ERR 03: ILLEGAL_FUNCTION_TYPE",
- "ERR 04: ILLEGAL_FUNCTION_MODE",
- "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
- "ERR 06: ILLEGAL_FUNCTION_SIZE",
- "ERR 07: Zlib_MISSING_INIT_EOM",
- "ERR 08: ILLEGAL_FUNCTION_RSVD",
- "ERR 09: ILLEGAL_BUFFER_LENGTH",
- "ERR 10: VLSB_FAULT",
- "ERR 11: ILLEGAL_MEM_ADDR",
- "ERR 12: ILLEGAL_MEM_SEL",
- "ERR 13: ILLEGAL_CONTEXT_ID",
- "ERR 14: ILLEGAL_KEY_ADDR",
- "ERR 15: 0xF Reserved",
- "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
- "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
- "ERR 18: CMD_TIMEOUT",
- "ERR 19: IDMA0_AXI_SLVERR",
- "ERR 20: IDMA0_AXI_DECERR",
- "ERR 21: 0x15 Reserved",
- "ERR 22: IDMA1_AXI_SLAVE_FAULT",
- "ERR 23: IDMA1_AIXI_DECERR",
- "ERR 24: 0x18 Reserved",
- "ERR 25: ZLIBVHB_AXI_SLVERR",
- "ERR 26: ZLIBVHB_AXI_DECERR",
- "ERR 27: 0x1B Reserved",
- "ERR 27: ZLIB_UNEXPECTED_EOM",
- "ERR 27: ZLIB_EXTRA_DATA",
- "ERR 30: ZLIB_BTYPE",
- "ERR 31: ZLIB_UNDEFINED_SYMBOL",
- "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
- "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
- "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
- "ERR 35: ZLIB_UNCOMPRESSED_LEN",
- "ERR 36: ZLIB_LIMIT_REACHED",
- "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
- "ERR 38: ODMA0_AXI_SLVERR",
- "ERR 39: ODMA0_AXI_DECERR",
- "ERR 40: 0x28 Reserved",
- "ERR 41: ODMA1_AXI_SLVERR",
- "ERR 42: ODMA1_AXI_DECERR",
- "ERR 43: LSB_PARITY_ERR",
+ "ILLEGAL_ENGINE",
+ "ILLEGAL_KEY_ID",
+ "ILLEGAL_FUNCTION_TYPE",
+ "ILLEGAL_FUNCTION_MODE",
+ "ILLEGAL_FUNCTION_ENCRYPT",
+ "ILLEGAL_FUNCTION_SIZE",
+ "Zlib_MISSING_INIT_EOM",
+ "ILLEGAL_FUNCTION_RSVD",
+ "ILLEGAL_BUFFER_LENGTH",
+ "VLSB_FAULT",
+ "ILLEGAL_MEM_ADDR",
+ "ILLEGAL_MEM_SEL",
+ "ILLEGAL_CONTEXT_ID",
+ "ILLEGAL_KEY_ADDR",
+ "0xF Reserved",
+ "Zlib_ILLEGAL_MULTI_QUEUE",
+ "Zlib_ILLEGAL_JOBID_CHANGE",
+ "CMD_TIMEOUT",
+ "IDMA0_AXI_SLVERR",
+ "IDMA0_AXI_DECERR",
+ "0x15 Reserved",
+ "IDMA1_AXI_SLAVE_FAULT",
+ "IDMA1_AIXI_DECERR",
+ "0x18 Reserved",
+ "ZLIBVHB_AXI_SLVERR",
+ "ZLIBVHB_AXI_DECERR",
+ "0x1B Reserved",
+ "ZLIB_UNEXPECTED_EOM",
+ "ZLIB_EXTRA_DATA",
+ "ZLIB_BTYPE",
+ "ZLIB_UNDEFINED_SYMBOL",
+ "ZLIB_UNDEFINED_DISTANCE_S",
+ "ZLIB_CODE_LENGTH_SYMBOL",
+ "ZLIB _VHB_ILLEGAL_FETCH",
+ "ZLIB_UNCOMPRESSED_LEN",
+ "ZLIB_LIMIT_REACHED",
+ "ZLIB_CHECKSUM_MISMATCH0",
+ "ODMA0_AXI_SLVERR",
+ "ODMA0_AXI_DECERR",
+ "0x28 Reserved",
+ "ODMA1_AXI_SLVERR",
+ "ODMA1_AXI_DECERR",
};
-void ccp_log_error(struct ccp_device *d, int e)
+void ccp_log_error(struct ccp_device *d, unsigned int e)
{
- dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+ if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
+ return;
+
+ if (e < ARRAY_SIZE(ccp_error_codes))
+ dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
+ else
+ dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
}
/* List of CCPs, CCP count, read-write access lock, and access functions
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 9c190eba1ce6..f9949b5ba47a 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -614,7 +614,7 @@ struct ccp5_desc {
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);
-extern void ccp_log_error(struct ccp_device *, int);
+extern void ccp_log_error(struct ccp_device *, unsigned int);
struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
bool ccp_queues_suspended(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index f3542aede519..e22aee8054ba 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1230,6 +1230,9 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
int ret;
/* Error checks */
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
+ return -EINVAL;
+
if (!cmd_q->ccp->vdata->perform->des3)
return -EINVAL;
@@ -1306,8 +1309,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
* passthru option to convert from big endian to little endian.
*/
if (des3->mode != CCP_DES3_MODE_ECB) {
- u32 load_mode;
-
op.sb_ctx = cmd_q->sb_ctx;
ret = ccp_init_dm_workarea(&ctx, cmd_q,
@@ -1320,12 +1321,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dm_offset = CCP_SB_BYTES - des3->iv_len;
ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
- load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
- else
- load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
- load_mode);
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
@@ -1387,10 +1384,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
}
/* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
- dm_offset = CCP_SB_BYTES - des3->iv_len;
- else
- dm_offset = 0;
ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
DES3_EDE_BLOCK_SIZE);
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 3aec408ebba4..7985e21db028 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -816,7 +816,11 @@ static void talitos_unregister_rng(struct device *dev)
* HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
*/
#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
+#ifdef CONFIG_CRYPTO_DEV_TALITOS_SEC2
#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+#else
+#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
+#endif
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
struct talitos_ctx {
@@ -984,7 +988,6 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
void *icvdata;
edesc = container_of(desc, struct talitos_edesc, desc);
@@ -998,9 +1001,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
else
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2];
- sg = sg_last(areq->dst, edesc->dst_nents);
- memcpy((char *)sg_virt(sg) + sg->length - authsize,
- icvdata, authsize);
+ sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
+ authsize, areq->assoclen + areq->cryptlen);
}
kfree(edesc);
@@ -1016,7 +1018,6 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
char *oicv, *icv;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1026,9 +1027,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
ipsec_esp_unmap(dev, edesc, req);
if (!err) {
+ char icvdata[SHA512_DIGEST_SIZE];
+ int nents = edesc->dst_nents ? : 1;
+ unsigned int len = req->assoclen + req->cryptlen;
+
/* auth check */
- sg = sg_last(req->dst, edesc->dst_nents ? : 1);
- icv = (char *)sg_virt(sg) + sg->length - authsize;
+ if (nents > 1) {
+ sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
+ len - authsize);
+ icv = icvdata;
+ } else {
+ icv = (char *)sg_virt(req->dst) + len - authsize;
+ }
if (edesc->dma_len) {
if (is_sec1)
@@ -1465,7 +1475,6 @@ static int aead_decrypt(struct aead_request *req)
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
void *icvdata;
req->cryptlen -= authsize;
@@ -1475,7 +1484,8 @@ static int aead_decrypt(struct aead_request *req)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
((!edesc->src_nents && !edesc->dst_nents) ||
priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
@@ -1500,9 +1510,8 @@ static int aead_decrypt(struct aead_request *req)
else
icvdata = &edesc->link_tbl[0];
- sg = sg_last(req->src, edesc->src_nents ? : 1);
-
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
+ sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+ req->assoclen + req->cryptlen - authsize);
return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
}
@@ -1655,6 +1664,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1672,6 +1689,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, false);
@@ -2192,7 +2217,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2236,7 +2261,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha1),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2278,7 +2303,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha224-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2322,7 +2347,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha224),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha224-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2364,7 +2389,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2408,7 +2433,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha256),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2534,7 +2559,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2576,7 +2601,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2630,7 +2655,7 @@ static struct talitos_alg_template driver_algs[] = {
.alg.crypto = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
@@ -3118,7 +3143,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_priority = t_alg->algt.priority;
else
alg->cra_priority = TALITOS_CRA_PRIORITY;
- alg->cra_alignmask = 0;
+ if (has_ftr_sec1(priv))
+ alg->cra_alignmask = 3;
+ else
+ alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d9118ec23025..c251d46a3d46 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -63,6 +63,7 @@
#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
#include <linux/mempool.h>
+#include <linux/numa.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDA(dma_ida);
@@ -388,7 +389,8 @@ EXPORT_SYMBOL(dma_issue_pending_all);
static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
{
int node = dev_to_node(chan->device->dev);
- return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
+ return node == NUMA_NO_NODE ||
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index d43f37d99c7d..1fd4f3eec995 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -408,7 +408,7 @@ static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
-struct clock_source *dce120_clock_source_create(
+static struct clock_source *dce120_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -431,14 +431,14 @@ struct clock_source *dce120_clock_source_create(
return NULL;
}
-void dce120_clock_source_destroy(struct clock_source **clk_src)
+static void dce120_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
}
-bool dce120_hw_sequencer_create(struct dc *dc)
+static bool dce120_hw_sequencer_create(struct dc *dc)
{
/* All registers used by dce11.2 match those in dce11 in offset and
* structure
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 04440064b9b7..e5b3ba73e661 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -382,7 +382,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
cfg |= ATMEL_HLCDC_LAYER_LAEN;
else
cfg |= ATMEL_HLCDC_LAYER_GAEN |
- ATMEL_HLCDC_LAYER_GA(state->base.alpha >> 8);
+ ATMEL_HLCDC_LAYER_GA(state->base.alpha);
}
if (state->disc_h && state->disc_w)
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index e2f775d1c112..21bec4548092 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1321,7 +1321,10 @@ static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
- return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags));
+
+ if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
+ return -EFAULT;
+ return 0;
}
int drm_legacy_infobufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 67b1fca39aa6..138680b37c70 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -372,7 +372,10 @@ static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
.size = from->buf_size,
.low_mark = from->low_mark,
.high_mark = from->high_mark};
- return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags));
+
+ if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
+ return -EFAULT;
+ return 0;
}
static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 94003ca761f6..c014c1ba7e8a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7373,6 +7373,9 @@ enum {
#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+#define GEN8_L3CNTLREG _MMIO(0x7034)
+ #define GEN8_ERRDETBCTRL (1 << 9)
+
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 5bd42e3955b0..aa7bd9cd490a 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -488,8 +488,18 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
return 0;
}
+static void wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val);
+
static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
{
+ struct i915_wa_list *wal = &dev_priv->gt_wa_list;
+
+ /* WaDisableBankHangMode:icl */
+ wa_write(wal,
+ GEN8_L3CNTLREG,
+ I915_READ(GEN8_L3CNTLREG) |
+ GEN8_ERRDETBCTRL);
+
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 12c80dfcff59..c7daae53fa1f 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -120,6 +120,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
OSD_COLOR_MATRIX_32_ARGB;
break;
+ case DRM_FORMAT_XBGR8888:
+ /* For XRGB, replace the pixel's alpha by 0xFF */
+ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ABGR;
+ break;
case DRM_FORMAT_ARGB8888:
/* For ARGB, use the pixel's alpha */
writel_bits_relaxed(OSD_REPLACE_EN, 0,
@@ -127,6 +134,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
OSD_COLOR_MATRIX_32_ARGB;
break;
+ case DRM_FORMAT_ABGR8888:
+ /* For ARGB, use the pixel's alpha */
+ writel_bits_relaxed(OSD_REPLACE_EN, 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ABGR;
+ break;
case DRM_FORMAT_RGB888:
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
OSD_COLOR_MATRIX_24_RGB;
@@ -196,7 +210,9 @@ static const struct drm_plane_funcs meson_plane_funcs = {
static const uint32_t supported_drm_formats[] = {
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 669c2d4b070d..5c068301d817 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -395,19 +395,17 @@ static const unsigned int a3xx_registers[] = {
0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
- 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
- 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
- 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
- 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
- 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
- 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
- 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
- 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
- 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
- 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
- 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
- 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
- 0x303c, 0x303c, 0x305e, 0x305f,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
+ 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
+ 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
+ 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
+ 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
+ 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
+ 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
+ 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
+ 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+ 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
+ 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
~0 /* sentinel */
};
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d2d427..42f0ecb0cf35 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -635,7 +635,7 @@ fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
- return NULL;
+ return ERR_PTR(ret);
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index f8f9ae6622eb..873624a11ce8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -880,7 +880,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct vop *vop = to_vop(crtc);
adjusted_mode->clock =
- clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
+ DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
+ 1000);
return true;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 239e1edf0919..745ea3b4e06f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -448,11 +448,11 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (unlikely(ret != 0))
return ret;
- ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
- vsgt->num_pages, 0,
- (unsigned long)
- vsgt->num_pages << PAGE_SHIFT,
- GFP_KERNEL);
+ ret = __sg_alloc_table_from_pages
+ (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long) vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->dev->dev),
+ GFP_KERNEL);
if (unlikely(ret != 0))
goto out_sg_alloc_fail;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 71fecc3e7685..0f45a9519516 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -307,18 +307,23 @@ static void wacom_feature_mapping(struct hid_device *hdev,
wacom_hid_usage_quirk(hdev, field, usage);
switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCH_RING_SETTING:
+ wacom->generic_has_leds = true;
+ break;
case HID_DG_CONTACTMAX:
/* leave touch_max as is if predefined */
if (!features->touch_max) {
/* read manually */
- data = kzalloc(2, GFP_KERNEL);
+ n = hid_report_len(field->report);
+ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
if (!data)
break;
data[0] = field->report->id;
ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
- data, 2, WAC_CMD_RETRIES);
- if (ret == 2) {
- features->touch_max = data[1];
+ data, n, WAC_CMD_RETRIES);
+ if (ret == n) {
+ ret = hid_report_raw_event(hdev,
+ HID_FEATURE_REPORT, data, n, 0);
} else {
features->touch_max = 16;
hid_warn(hdev, "wacom_feature_mapping: "
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 09b8e4aac82f..bde3416cce7e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1930,8 +1930,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_BUTTONCENTER:
- wacom->generic_has_leds = true;
- /* fall through */
case WACOM_HID_WD_BUTTONHOME:
case WACOM_HID_WD_BUTTONUP:
case WACOM_HID_WD_BUTTONDOWN:
@@ -2123,14 +2121,12 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
bool active = wacom_wac->hid_data.inrange_state != 0;
/* report prox for expresskey events */
- if ((wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) &&
- wacom_wac->hid_data.pad_input_event_flag) {
+ if (wacom_wac->hid_data.pad_input_event_flag) {
input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
input_sync(input);
if (!active)
wacom_wac->hid_data.pad_input_event_flag = false;
}
-
}
static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
@@ -2516,6 +2512,7 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+ struct wacom_features *features = &wacom->wacom_wac.features;
switch (equivalent_usage) {
case HID_GD_X:
@@ -2536,6 +2533,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch = value;
break;
+ case HID_DG_CONTACTMAX:
+ features->touch_max = value;
+ return;
}
@@ -2706,9 +2706,7 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
if (report->type != HID_INPUT_REPORT)
return -1;
- if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
- wacom_wac_pad_report(hdev, report, field);
- else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_report(hdev, report);
else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_report(hdev, report);
@@ -2722,7 +2720,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct hid_field *field;
bool pad_in_hid_field = false, pen_in_hid_field = false,
- finger_in_hid_field = false;
+ finger_in_hid_field = false, true_pad = false;
int r;
int prev_collection = -1;
@@ -2738,6 +2736,8 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
pen_in_hid_field = true;
if (WACOM_FINGER_FIELD(field))
finger_in_hid_field = true;
+ if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY)
+ true_pad = true;
}
wacom_wac_battery_pre_report(hdev, report);
@@ -2761,6 +2761,9 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
}
wacom_wac_battery_report(hdev, report);
+
+ if (true_pad && wacom->wacom_wac.pad_input)
+ wacom_wac_pad_report(hdev, report, field);
}
static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -3717,7 +3720,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
0, 5920, 4, 0);
}
input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
- input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40);
/* fall through */
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 295fd3718caa..f67d871841c0 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -145,6 +145,7 @@
#define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
#define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
#define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_WD_TOUCH_RING_SETTING (WACOM_HID_UP_WACOMDIGITIZER | 0x1032)
#define WACOM_HID_UP_G9 0xff090000
#define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
#define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 36016c09dd96..6ebf6a2edb33 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -640,7 +640,7 @@ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
goto err_out;
ret = -ENOMEM;
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
if (!page)
goto err_free_sgt;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 68c9201b0677..a069926a3ff8 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -39,6 +39,7 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <linux/numa.h>
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
@@ -477,7 +478,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
if (!node_online(node))
- node = -1;
+ node = NUMA_NO_NODE;
drhd->iommu->node = node;
return 0;
}
@@ -1062,7 +1063,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->msagaw = msagaw;
iommu->segment = drhd->segment;
- iommu->node = -1;
+ iommu->node = NUMA_NO_NODE;
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index dd8697a10219..40ad32762b38 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -46,6 +46,7 @@
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
#include <linux/crash_dump.h>
+#include <linux/numa.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -1836,7 +1837,7 @@ static struct dmar_domain *alloc_domain(int flags)
return NULL;
memset(domain, 0, sizeof(*domain));
- domain->nid = -1;
+ domain->nid = NUMA_NO_NODE;
domain->flags = flags;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 56a06375978e..b7bdb0600852 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -326,10 +326,11 @@ static int bch_allocator_thread(void *arg)
* possibly issue discards to them, then we add the bucket to
* the free list:
*/
- while (!fifo_empty(&ca->free_inc)) {
+ while (1) {
long bucket;
- fifo_pop(&ca->free_inc, bucket);
+ if (!fifo_pop(&ca->free_inc, bucket))
+ break;
if (ca->discard) {
mutex_unlock(&ca->set->bucket_lock);
@@ -391,6 +392,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
struct bucket *b;
long r;
+
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
+ return -1;
+
/* fastpath */
if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
fifo_pop(&ca->free[reserve], r))
@@ -482,6 +488,10 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
{
int i;
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return -1;
+
lockdep_assert_held(&c->bucket_lock);
BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 2f6c60d140b3..bab49ef2a922 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -704,8 +704,8 @@ struct cache_set {
atomic_long_t writeback_keys_failed;
atomic_long_t reclaim;
+ atomic_long_t reclaimed_journal_buckets;
atomic_long_t flush_write;
- atomic_long_t retry_flush_write;
enum {
ON_ERROR_UNREGISTER,
@@ -725,8 +725,6 @@ struct cache_set {
#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
-
- DECLARE_HEAP(struct btree *, flush_btree);
};
struct bbio {
@@ -1005,7 +1003,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size);
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
uint8_t *set_uuid);
void bch_cached_dev_detach(struct cached_dev *dc);
-void bch_cached_dev_run(struct cached_dev *dc);
+int bch_cached_dev_run(struct cached_dev *dc);
void bcache_device_stop(struct bcache_device *d);
void bch_cache_set_unregister(struct cache_set *c);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 1b895b4fed60..cd92e6205225 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -346,22 +346,19 @@ EXPORT_SYMBOL(bch_btree_keys_alloc);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks)
{
- unsigned int i;
-
b->ops = ops;
b->expensive_debug_checks = expensive_debug_checks;
b->nsets = 0;
b->last_set_unwritten = 0;
- /* XXX: shouldn't be needed */
- for (i = 0; i < MAX_BSETS; i++)
- b->set[i].size = 0;
/*
- * Second loop starts at 1 because b->keys[0]->data is the memory we
- * allocated
+ * struct btree_keys in embedded in struct btree, and struct
+ * bset_tree is embedded into struct btree_keys. They are all
+ * initialized as 0 by kzalloc() in mca_bucket_alloc(), and
+ * b->set[0].data is allocated in bch_btree_keys_alloc(), so we
+ * don't have to initiate b->set[].size and b->set[].data here
+ * any more.
*/
- for (i = 1; i < MAX_BSETS; i++)
- b->set[i].data = NULL;
}
EXPORT_SYMBOL(bch_btree_keys_init);
@@ -886,12 +883,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
struct btree_iter iter;
+ struct bkey preceding_key_on_stack = ZERO_KEY;
+ struct bkey *preceding_key_p = &preceding_key_on_stack;
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
- m = bch_btree_iter_init(b, &iter, b->ops->is_extents
- ? PRECEDING_KEY(&START_KEY(k))
- : PRECEDING_KEY(k));
+ /*
+ * If k has preceding key, preceding_key_p will be set to address
+ * of k's preceding key; otherwise preceding_key_p will be set
+ * to NULL inside preceding_key().
+ */
+ if (b->ops->is_extents)
+ preceding_key(&START_KEY(k), &preceding_key_p);
+ else
+ preceding_key(k, &preceding_key_p);
+
+ m = bch_btree_iter_init(b, &iter, preceding_key_p);
if (b->ops->insert_fixup(b, k, &iter, replace_key))
return status;
@@ -959,45 +966,25 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
unsigned int inorder, j, n = 1;
do {
- /*
- * A bit trick here.
- * If p < t->size, (int)(p - t->size) is a minus value and
- * the most significant bit is set, right shifting 31 bits
- * gets 1. If p >= t->size, the most significant bit is
- * not set, right shifting 31 bits gets 0.
- * So the following 2 lines equals to
- * if (p >= t->size)
- * p = 0;
- * but a branch instruction is avoided.
- */
unsigned int p = n << 4;
- p &= ((int) (p - t->size)) >> 31;
-
- prefetch(&t->tree[p]);
+ if (p < t->size)
+ prefetch(&t->tree[p]);
j = n;
f = &t->tree[j];
- /*
- * Similar bit trick, use subtract operation to avoid a branch
- * instruction.
- *
- * n = (f->mantissa > bfloat_mantissa())
- * ? j * 2
- * : j * 2 + 1;
- *
- * We need to subtract 1 from f->mantissa for the sign bit trick
- * to work - that's done in make_bfloat()
- */
- if (likely(f->exponent != 127))
- n = j * 2 + (((unsigned int)
- (f->mantissa -
- bfloat_mantissa(search, f))) >> 31);
- else
- n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
- ? j * 2
- : j * 2 + 1;
+ if (likely(f->exponent != 127)) {
+ if (f->mantissa >= bfloat_mantissa(search, f))
+ n = j * 2;
+ else
+ n = j * 2 + 1;
+ } else {
+ if (bkey_cmp(tree_to_bkey(t, j), search) > 0)
+ n = j * 2;
+ else
+ n = j * 2 + 1;
+ }
} while (n < t->size);
inorder = to_inorder(j, t);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index fe584622d8dc..fe331b02ba16 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -433,20 +433,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
return __bch_cut_back(where, k);
}
-#define PRECEDING_KEY(_k) \
-({ \
- struct bkey *_ret = NULL; \
- \
- if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
- _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
- \
- if (!_ret->low) \
- _ret->high--; \
- _ret->low--; \
- } \
- \
- _ret; \
-})
+/*
+ * Pointer '*preceding_key_p' points to a memory object to store preceding
+ * key of k. If the preceding key does not exist, set '*preceding_key_p' to
+ * NULL. So the caller of preceding_key() needs to take care of memory
+ * which '*preceding_key_p' pointed to before calling preceding_key().
+ * Currently the only caller of preceding_key() is bch_btree_insert_key(),
+ * and it points to an on-stack variable, so the memory release is handled
+ * by stackframe itself.
+ */
+static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
+{
+ if (KEY_INODE(k) || KEY_OFFSET(k)) {
+ (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
+ if (!(*preceding_key_p)->low)
+ (*preceding_key_p)->high--;
+ (*preceding_key_p)->low--;
+ } else {
+ (*preceding_key_p) = NULL;
+ }
+}
static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
{
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 18d85934a5e8..c356e29780a5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -34,7 +34,7 @@
#include <linux/rcupdate.h>
#include <linux/sched/clock.h>
#include <linux/rculist.h>
-
+#include <linux/delay.h>
#include <trace/events/bcache.h>
/*
@@ -611,6 +611,10 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
static struct btree *mca_bucket_alloc(struct cache_set *c,
struct bkey *k, gfp_t gfp)
{
+ /*
+ * kzalloc() is necessary here for initialization,
+ * see code comments in bch_btree_keys_init().
+ */
struct btree *b = kzalloc(sizeof(struct btree), gfp);
if (!b)
@@ -653,7 +657,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
up(&b->io_mutex);
}
+retry:
+ /*
+ * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
+ * __bch_btree_node_write(). To avoid an extra flush, acquire
+ * b->write_lock before checking BTREE_NODE_dirty bit.
+ */
mutex_lock(&b->write_lock);
+ /*
+ * If this btree node is selected in btree_flush_write() by journal
+ * code, delay and retry until the node is flushed by journal code
+ * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
+ */
+ if (btree_node_journal_flush(b)) {
+ pr_debug("bnode %p is flushing by journal, retry", b);
+ mutex_unlock(&b->write_lock);
+ udelay(1);
+ goto retry;
+ }
+
if (btree_node_dirty(b))
__bch_btree_node_write(b, &cl);
mutex_unlock(&b->write_lock);
@@ -776,10 +798,15 @@ void bch_btree_cache_free(struct cache_set *c)
while (!list_empty(&c->btree_cache)) {
b = list_first_entry(&c->btree_cache, struct btree, list);
- if (btree_node_dirty(b))
+ /*
+ * This function is called by cache_set_free(), no I/O
+ * request on cache now, it is unnecessary to acquire
+ * b->write_lock before clearing BTREE_NODE_dirty anymore.
+ */
+ if (btree_node_dirty(b)) {
btree_complete_write(b, btree_current_write(b));
- clear_bit(BTREE_NODE_dirty, &b->flags);
-
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+ }
mca_data_free(b);
}
@@ -1065,11 +1092,25 @@ static void btree_node_free(struct btree *b)
BUG_ON(b == b->c->root);
+retry:
mutex_lock(&b->write_lock);
+ /*
+ * If the btree node is selected and flushing in btree_flush_write(),
+ * delay and retry until the BTREE_NODE_journal_flush bit cleared,
+ * then it is safe to free the btree node here. Otherwise this btree
+ * node will be in race condition.
+ */
+ if (btree_node_journal_flush(b)) {
+ mutex_unlock(&b->write_lock);
+ pr_debug("bnode %p journal_flush set, retry", b);
+ udelay(1);
+ goto retry;
+ }
- if (btree_node_dirty(b))
+ if (btree_node_dirty(b)) {
btree_complete_write(b, btree_current_write(b));
- clear_bit(BTREE_NODE_dirty, &b->flags);
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+ }
mutex_unlock(&b->write_lock);
@@ -1474,11 +1515,11 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
out_nocoalesce:
closure_sync(&cl);
- bch_keylist_free(&keylist);
while ((k = bch_keylist_pop(&keylist)))
if (!bkey_cmp(k, &ZERO_KEY))
atomic_dec(&b->c->prio_blocked);
+ bch_keylist_free(&keylist);
for (i = 0; i < nodes; i++)
if (!IS_ERR_OR_NULL(new_nodes[i])) {
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index e9432b4d33a8..15c1c6cd826e 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -157,11 +157,13 @@ enum btree_flags {
BTREE_NODE_io_error,
BTREE_NODE_dirty,
BTREE_NODE_write_idx,
+ BTREE_NODE_journal_flush,
};
BTREE_FLAG(io_error);
BTREE_FLAG(dirty);
BTREE_FLAG(write_idx);
+BTREE_FLAG(journal_flush);
static inline struct btree_write *btree_current_write(struct btree *b)
{
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 47b7e54180ed..71b5a3cda04d 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -57,6 +57,18 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
+ /*
+ * Read-ahead requests on a degrading and recovering md raid
+ * (e.g. raid6) device might be failured immediately by md
+ * raid code, which is not a real hardware media failure. So
+ * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
+ */
+ if (bio->bi_opf & REQ_RAHEAD) {
+ pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
+ dc->backing_dev_name);
+ return;
+ }
+
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
pr_err("%s: IO error on backing device, unrecoverable",
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 5cf9df00eefc..986cc59b3843 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -99,6 +99,20 @@ reread: left = ca->sb.bucket_size - offset;
blocks = set_blocks(j, block_bytes(ca->set));
+ /*
+ * Nodes in 'list' are in linear increasing order of
+ * i->j.seq, the node on head has the smallest (oldest)
+ * journal seq, the node on tail has the biggest
+ * (latest) journal seq.
+ */
+
+ /*
+ * Check from the oldest jset for last_seq. If
+ * i->j.seq < j->last_seq, it means the oldest jset
+ * in list is expired and useless, remove it from
+ * this list. Otherwise, j is a condidate jset for
+ * further following checks.
+ */
while (!list_empty(list)) {
i = list_first_entry(list,
struct journal_replay, list);
@@ -108,13 +122,22 @@ reread: left = ca->sb.bucket_size - offset;
kfree(i);
}
+ /* iterate list in reverse order (from latest jset) */
list_for_each_entry_reverse(i, list, list) {
if (j->seq == i->j.seq)
goto next_set;
+ /*
+ * if j->seq is less than any i->j.last_seq
+ * in list, j is an expired and useless jset.
+ */
if (j->seq < i->j.last_seq)
goto next_set;
+ /*
+ * 'where' points to first jset in list which
+ * is elder then j.
+ */
if (j->seq > i->j.seq) {
where = &i->list;
goto add;
@@ -128,10 +151,12 @@ add:
if (!i)
return -ENOMEM;
memcpy(&i->j, j, bytes);
+ /* Add to the location after 'where' points to */
list_add(&i->list, where);
ret = 1;
- ja->seq[bucket_index] = j->seq;
+ if (j->seq > ja->seq[bucket_index])
+ ja->seq[bucket_index] = j->seq;
next_set:
offset += blocks * ca->sb.block_size;
len -= blocks * ca->sb.block_size;
@@ -146,7 +171,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
{
#define read_bucket(b) \
({ \
- int ret = journal_read_bucket(ca, list, b); \
+ ret = journal_read_bucket(ca, list, b); \
__set_bit(b, bitmap); \
if (ret < 0) \
return ret; \
@@ -155,6 +180,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
struct cache *ca;
unsigned int iter;
+ int ret = 0;
for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal;
@@ -316,6 +342,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
}
+static bool is_discard_enabled(struct cache_set *s)
+{
+ struct cache *ca;
+ unsigned int i;
+
+ for_each_cache(ca, s, i)
+ if (ca->discard)
+ return true;
+
+ return false;
+}
+
int bch_journal_replay(struct cache_set *s, struct list_head *list)
{
int ret = 0, keys = 0, entries = 0;
@@ -329,9 +367,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
- cache_set_err_on(n != i->j.seq, s,
-"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
- n, i->j.seq - 1, start, end);
+ if (n != i->j.seq) {
+ if (n == start && is_discard_enabled(s))
+ pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
+ else {
+ pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
+ ret = -EIO;
+ goto err;
+ }
+ }
for (k = i->j.start;
k < bset_bkey_last(&i->j);
@@ -369,60 +415,90 @@ err:
}
/* Journalling */
-#define journal_max_cmp(l, r) \
- (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
- fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
-#define journal_min_cmp(l, r) \
- (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
- fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
static void btree_flush_write(struct cache_set *c)
{
- /*
- * Try to find the btree node with that references the oldest journal
- * entry, best is our current candidate and is locked if non NULL:
- */
- struct btree *b;
- int i;
+ struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
+ unsigned int i, n;
+
+ if (c->journal.btree_flushing)
+ return;
+
+ spin_lock(&c->journal.flush_write_lock);
+ if (c->journal.btree_flushing) {
+ spin_unlock(&c->journal.flush_write_lock);
+ return;
+ }
+ c->journal.btree_flushing = true;
+ spin_unlock(&c->journal.flush_write_lock);
atomic_long_inc(&c->flush_write);
+ memset(btree_nodes, 0, sizeof(btree_nodes));
+ n = 0;
-retry:
- spin_lock(&c->journal.lock);
- if (heap_empty(&c->flush_btree)) {
- for_each_cached_btree(b, c, i)
- if (btree_current_write(b)->journal) {
- if (!heap_full(&c->flush_btree))
- heap_add(&c->flush_btree, b,
- journal_max_cmp);
- else if (journal_max_cmp(b,
- heap_peek(&c->flush_btree))) {
- c->flush_btree.data[0] = b;
- heap_sift(&c->flush_btree, 0,
- journal_max_cmp);
- }
- }
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ if (btree_node_journal_flush(b))
+ pr_err("BUG: flush_write bit should not be set here!");
+
+ mutex_lock(&b->write_lock);
+
+ if (!btree_node_dirty(b)) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
- for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
- heap_sift(&c->flush_btree, i, journal_min_cmp);
+ if (!btree_current_write(b)->journal) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
+ set_btree_node_journal_flush(b);
+
+ mutex_unlock(&b->write_lock);
+
+ btree_nodes[n++] = b;
+ if (n == BTREE_FLUSH_NR)
+ break;
}
+ mutex_unlock(&c->bucket_lock);
- b = NULL;
- heap_pop(&c->flush_btree, b, journal_min_cmp);
- spin_unlock(&c->journal.lock);
+ for (i = 0; i < n; i++) {
+ b = btree_nodes[i];
+ if (!b) {
+ pr_err("BUG: btree_nodes[%d] is NULL", i);
+ continue;
+ }
+
+ /* safe to check without holding b->write_lock */
+ if (!btree_node_journal_flush(b)) {
+ pr_err("BUG: bnode %p: journal_flush bit cleaned", b);
+ continue;
+ }
- if (b) {
mutex_lock(&b->write_lock);
if (!btree_current_write(b)->journal) {
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
mutex_unlock(&b->write_lock);
- /* We raced */
- atomic_long_inc(&c->retry_flush_write);
- goto retry;
+ pr_debug("bnode %p: written by others", b);
+ continue;
+ }
+
+ if (!btree_node_dirty(b)) {
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ pr_debug("bnode %p: dirty bit cleaned by others", b);
+ continue;
}
__bch_btree_node_write(b, NULL);
+ clear_bit(BTREE_NODE_journal_flush, &b->flags);
mutex_unlock(&b->write_lock);
}
+
+ spin_lock(&c->journal.flush_write_lock);
+ c->journal.btree_flushing = false;
+ spin_unlock(&c->journal.flush_write_lock);
}
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
@@ -537,13 +613,14 @@ static void journal_reclaim(struct cache_set *c)
k->ptr[n++] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
+ atomic_long_inc(&c->reclaimed_journal_buckets);
}
- bkey_init(k);
- SET_KEY_PTRS(k, n);
-
- if (n)
+ if (n) {
+ bkey_init(k);
+ SET_KEY_PTRS(k, n);
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+ }
out:
if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait);
@@ -670,6 +747,9 @@ static void journal_write_unlocked(struct closure *cl)
ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
}
+ /* If KEY_PTRS(k) == 0, this jset gets lost in air */
+ BUG_ON(i == 0);
+
atomic_dec_bug(&fifo_back(&c->journal.pin));
bch_journal_next(&c->journal);
journal_reclaim(c);
@@ -786,6 +866,10 @@ atomic_t *bch_journal(struct cache_set *c,
struct journal_write *w;
atomic_t *ret;
+ /* No journaling if CACHE_SET_IO_DISABLE set already */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return NULL;
+
if (!CACHE_SYNC(&c->sb))
return NULL;
@@ -830,7 +914,6 @@ void bch_journal_free(struct cache_set *c)
free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
free_fifo(&c->journal.pin);
- free_heap(&c->flush_btree);
}
int bch_journal_alloc(struct cache_set *c)
@@ -838,6 +921,7 @@ int bch_journal_alloc(struct cache_set *c)
struct journal *j = &c->journal;
spin_lock_init(&j->lock);
+ spin_lock_init(&j->flush_write_lock);
INIT_DELAYED_WORK(&j->work, journal_write_work);
c->journal_delay_ms = 100;
@@ -845,8 +929,7 @@ int bch_journal_alloc(struct cache_set *c)
j->w[0].c = c;
j->w[1].c = c;
- if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
- !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
!(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
!(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
return -ENOMEM;
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index 85f0d0fe904d..703ecabc7208 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -102,6 +102,8 @@ struct journal_write {
/* Embedded in struct cache_set */
struct journal {
spinlock_t lock;
+ spinlock_t flush_write_lock;
+ bool btree_flushing;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
struct closure io;
@@ -153,6 +155,8 @@ struct journal_device {
struct bio_vec bv[8];
};
+#define BTREE_FLUSH_NR 8
+
#define journal_pin_cmp(c, l, r) \
(fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 69a3612a66b9..37076a97a878 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -328,12 +328,13 @@ void bch_data_insert(struct closure *cl)
bch_data_insert_start(cl);
}
-/* Congested? */
-
-unsigned int bch_get_congested(struct cache_set *c)
+/*
+ * Congested? Return 0 (not congested) or the limit (in sectors)
+ * beyond which we should bypass the cache due to congestion.
+ */
+unsigned int bch_get_congested(const struct cache_set *c)
{
int i;
- long rand;
if (!c->congested_read_threshold_us &&
!c->congested_write_threshold_us)
@@ -352,8 +353,7 @@ unsigned int bch_get_congested(struct cache_set *c)
if (i > 0)
i = fract_exp_two(i, 6);
- rand = get_random_int();
- i -= bitmap_weight(&rand, BITS_PER_LONG);
+ i -= hweight32(get_random_u32());
return i > 0 ? i : 1;
}
@@ -375,7 +375,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{
struct cache_set *c = dc->disk.c;
unsigned int mode = cache_mode(dc);
- unsigned int sectors, congested = bch_get_congested(c);
+ unsigned int sectors, congested;
struct task_struct *task = current;
struct io *i;
@@ -411,6 +411,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto rescale;
}
+ congested = bch_get_congested(c);
if (!congested && !dc->sequential_cutoff)
goto rescale;
@@ -706,14 +707,14 @@ static void search_free(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
- atomic_dec(&s->d->c->search_inflight);
+ atomic_dec(&s->iop.c->search_inflight);
if (s->iop.bio)
bio_put(s->iop.bio);
bio_complete(s);
closure_debug_destroy(cl);
- mempool_free(s, s->d->c->search);
+ mempool_free(s, s->iop.c->search);
}
static inline struct search *search_alloc(struct bio *bio,
@@ -756,13 +757,13 @@ static void cached_dev_bio_complete(struct closure *cl)
struct search *s = container_of(cl, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- search_free(cl);
cached_dev_put(dc);
+ search_free(cl);
}
/* Process reads */
-static void cached_dev_cache_miss_done(struct closure *cl)
+static void cached_dev_read_error_done(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
@@ -800,7 +801,22 @@ static void cached_dev_read_error(struct closure *cl)
closure_bio_submit(s->iop.c, bio, cl);
}
- continue_at(cl, cached_dev_cache_miss_done, NULL);
+ continue_at(cl, cached_dev_read_error_done, NULL);
+}
+
+static void cached_dev_cache_miss_done(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+ struct bcache_device *d = s->d;
+
+ if (s->iop.replace_collision)
+ bch_mark_cache_miss_collision(s->iop.c, s->d);
+
+ if (s->iop.bio)
+ bio_free_pages(s->iop.bio);
+
+ cached_dev_bio_complete(cl);
+ closure_put(&d->cl);
}
static void cached_dev_read_done(struct closure *cl)
@@ -833,6 +849,7 @@ static void cached_dev_read_done(struct closure *cl)
if (verify(dc) && s->recoverable && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
+ closure_get(&dc->disk.cl);
bio_complete(s);
if (s->iop.bio &&
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 4177c0437286..a4c8351c649c 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -32,7 +32,7 @@ struct data_insert_op {
BKEY_PADDED(replace_key);
};
-unsigned int bch_get_congested(struct cache_set *c);
+unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3f19f6067f94..5d0813850ecf 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -41,6 +41,7 @@ static const char invalid_uuid[] = {
static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
+bool bcache_is_reboot;
LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);
@@ -50,6 +51,7 @@ static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
struct workqueue_struct *bch_journal_wq;
+
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
/* limitation of partitions number on single bcache device */
#define BCACHE_MINORS 128
@@ -198,7 +200,9 @@ err:
static void write_bdev_super_endio(struct bio *bio)
{
struct cached_dev *dc = bio->bi_private;
- /* XXX: error checking */
+
+ if (bio->bi_status)
+ bch_count_backing_io_errors(dc, bio);
closure_put(&dc->sb_write);
}
@@ -663,6 +667,11 @@ static const struct block_device_operations bcache_ops = {
void bcache_device_stop(struct bcache_device *d)
{
if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
+ /*
+ * closure_fn set to
+ * - cached device: cached_dev_flush()
+ * - flash dev: flash_dev_flush()
+ */
closure_queue(&d->cl);
}
@@ -687,6 +696,7 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
{
unsigned int i;
struct cache *ca;
+ int ret;
for_each_cache(ca, d->c, i)
bd_link_disk_holder(ca->bdev, d->disk);
@@ -694,9 +704,13 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
snprintf(d->name, BCACHEDEVNAME_SIZE,
"%s%u", name, d->id);
- WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
- sysfs_create_link(&c->kobj, &d->kobj, d->name),
- "Couldn't create device <-> cache set symlinks");
+ ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
+ if (ret < 0)
+ pr_err("Couldn't create device -> cache set symlink");
+
+ ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
+ if (ret < 0)
+ pr_err("Couldn't create cache set -> device symlink");
clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
}
@@ -906,7 +920,7 @@ static int cached_dev_status_update(void *arg)
}
-void bch_cached_dev_run(struct cached_dev *dc)
+int bch_cached_dev_run(struct cached_dev *dc)
{
struct bcache_device *d = &dc->disk;
char buf[SB_LABEL_SIZE + 1];
@@ -917,6 +931,12 @@ void bch_cached_dev_run(struct cached_dev *dc)
NULL,
};
+ if (dc->io_disable) {
+ pr_err("I/O disabled on cached dev %s",
+ dc->backing_dev_name);
+ return -EIO;
+ }
+
memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
buf[SB_LABEL_SIZE] = '\0';
env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
@@ -924,7 +944,9 @@ void bch_cached_dev_run(struct cached_dev *dc)
if (atomic_xchg(&dc->running, 1)) {
kfree(env[1]);
kfree(env[2]);
- return;
+ pr_info("cached dev %s is running already",
+ dc->backing_dev_name);
+ return -EBUSY;
}
if (!d->c &&
@@ -949,8 +971,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
kfree(env[2]);
if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
- sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
- pr_debug("error creating sysfs link");
+ sysfs_create_link(&disk_to_dev(d->disk)->kobj,
+ &d->kobj, "bcache")) {
+ pr_err("Couldn't create bcache dev <-> disk sysfs symlinks");
+ return -ENOMEM;
+ }
dc->status_update_thread = kthread_run(cached_dev_status_update,
dc, "bcache_status_update");
@@ -959,6 +984,8 @@ void bch_cached_dev_run(struct cached_dev *dc)
"continue to run without monitoring backing "
"device status");
}
+
+ return 0;
}
/*
@@ -996,7 +1023,6 @@ static void cached_dev_detach_finish(struct work_struct *w)
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(refcount_read(&dc->count));
- mutex_lock(&bch_register_lock);
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc);
@@ -1012,6 +1038,8 @@ static void cached_dev_detach_finish(struct work_struct *w)
bch_write_bdev_super(dc, &cl);
closure_sync(&cl);
+ mutex_lock(&bch_register_lock);
+
calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
@@ -1054,6 +1082,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
struct uuid_entry *u;
struct cached_dev *exist_dc, *t;
+ int ret = 0;
if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
(!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
@@ -1153,6 +1182,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
down_write(&dc->writeback_lock);
if (bch_cached_dev_writeback_start(dc)) {
up_write(&dc->writeback_lock);
+ pr_err("Couldn't start writeback facilities for %s",
+ dc->disk.disk->disk_name);
return -ENOMEM;
}
@@ -1163,7 +1194,22 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bch_sectors_dirty_init(&dc->disk);
- bch_cached_dev_run(dc);
+ ret = bch_cached_dev_run(dc);
+ if (ret && (ret != -EBUSY)) {
+ up_write(&dc->writeback_lock);
+ /*
+ * bch_register_lock is held, bcache_device_stop() is not
+ * able to be directly called. The kthread and kworker
+ * created previously in bch_cached_dev_writeback_start()
+ * have to be stopped manually here.
+ */
+ kthread_stop(dc->writeback_thread);
+ cancel_writeback_rate_update_dwork(dc);
+ pr_err("Couldn't run cached device %s",
+ dc->backing_dev_name);
+ return ret;
+ }
+
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
@@ -1177,6 +1223,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return 0;
}
+/* when dc->disk.kobj released */
void bch_cached_dev_release(struct kobject *kobj)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
@@ -1189,18 +1236,16 @@ static void cached_dev_free(struct closure *cl)
{
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
- mutex_lock(&bch_register_lock);
-
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc);
if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
- if (dc->writeback_write_wq)
- destroy_workqueue(dc->writeback_write_wq);
if (!IS_ERR_OR_NULL(dc->status_update_thread))
kthread_stop(dc->status_update_thread);
+ mutex_lock(&bch_register_lock);
+
if (atomic_read(&dc->running))
bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
bcache_device_free(&dc->disk);
@@ -1283,12 +1328,13 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
-static void register_bdev(struct cache_sb *sb, struct page *sb_page,
+static int register_bdev(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev,
struct cached_dev *dc)
{
const char *err = "cannot allocate memory";
struct cache_set *c;
+ int ret = -ENOMEM;
bdevname(bdev, dc->backing_dev_name);
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
@@ -1318,17 +1364,23 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
bch_cached_dev_attach(dc, c, NULL);
if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
- BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
- bch_cached_dev_run(dc);
+ BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
+ err = "failed to run cached device";
+ ret = bch_cached_dev_run(dc);
+ if (ret)
+ goto err;
+ }
- return;
+ return 0;
err:
pr_notice("error %s: %s", dc->backing_dev_name, err);
bcache_device_stop(&dc->disk);
+ return ret;
}
/* Flash only volumes */
+/* When d->kobj released */
void bch_flash_dev_release(struct kobject *kobj)
{
struct bcache_device *d = container_of(kobj, struct bcache_device,
@@ -1434,8 +1486,6 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
bool bch_cached_dev_error(struct cached_dev *dc)
{
- struct cache_set *c;
-
if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return false;
@@ -1446,21 +1496,6 @@ bool bch_cached_dev_error(struct cached_dev *dc)
pr_err("stop %s: too many IO errors on backing device %s\n",
dc->disk.disk->disk_name, dc->backing_dev_name);
- /*
- * If the cached device is still attached to a cache set,
- * even dc->io_disable is true and no more I/O requests
- * accepted, cache device internal I/O (writeback scan or
- * garbage collection) may still prevent bcache device from
- * being stopped. So here CACHE_SET_IO_DISABLE should be
- * set to c->flags too, to make the internal I/O to cache
- * device rejected and stopped immediately.
- * If c is NULL, that means the bcache device is not attached
- * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
- */
- c = dc->disk.c;
- if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
- pr_info("CACHE_SET_IO_DISABLE already set");
-
bcache_device_stop(&dc->disk);
return true;
}
@@ -1499,6 +1534,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
return true;
}
+/* When c->kobj released */
void bch_cache_set_release(struct kobject *kobj)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
@@ -1519,6 +1555,7 @@ static void cache_set_free(struct closure *cl)
bch_btree_cache_free(c);
bch_journal_free(c);
+ mutex_lock(&bch_register_lock);
for_each_cache(ca, c, i)
if (ca) {
ca->set = NULL;
@@ -1541,7 +1578,6 @@ static void cache_set_free(struct closure *cl)
mempool_destroy(c->search);
kfree(c->devices);
- mutex_lock(&bch_register_lock);
list_del(&c->list);
mutex_unlock(&bch_register_lock);
@@ -1564,19 +1600,23 @@ static void cache_set_flush(struct closure *cl)
kobject_put(&c->internal);
kobject_del(&c->kobj);
- if (c->gc_thread)
+ if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache);
- /* Should skip this if we're unregistering because of an error */
- list_for_each_entry(b, &c->btree_cache, list) {
- mutex_lock(&b->write_lock);
- if (btree_node_dirty(b))
- __bch_btree_node_write(b, NULL);
- mutex_unlock(&b->write_lock);
- }
+ /*
+ * Avoid flushing cached nodes if cache set is retiring
+ * due to too many I/O errors detected.
+ */
+ if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ list_for_each_entry(b, &c->btree_cache, list) {
+ mutex_lock(&b->write_lock);
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
+ }
for_each_cache(ca, c, i)
if (ca->alloc_thread)
@@ -1680,6 +1720,7 @@ static void __cache_set_unregister(struct closure *cl)
void bch_cache_set_stop(struct cache_set *c)
{
if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
+ /* closure_fn set to __cache_set_unregister() */
closure_queue(&c->caching);
}
@@ -1786,13 +1827,15 @@ err:
return NULL;
}
-static void run_cache_set(struct cache_set *c)
+static int run_cache_set(struct cache_set *c)
{
const char *err = "cannot allocate memory";
struct cached_dev *dc, *t;
struct cache *ca;
struct closure cl;
unsigned int i;
+ LIST_HEAD(journal);
+ struct journal_replay *l;
closure_init_stack(&cl);
@@ -1801,7 +1844,6 @@ static void run_cache_set(struct cache_set *c)
set_gc_sectors(c);
if (CACHE_SYNC(&c->sb)) {
- LIST_HEAD(journal);
struct bkey *k;
struct jset *j;
@@ -1851,6 +1893,23 @@ static void run_cache_set(struct cache_set *c)
if (bch_btree_check(c))
goto err;
+ /*
+ * bch_btree_check() may occupy too much system memory which
+ * has negative effects to user space application (e.g. data
+ * base) performance. Shrink the mca cache memory proactively
+ * here to avoid competing memory with user space workloads..
+ */
+ if (!c->shrinker_disabled) {
+ struct shrink_control sc;
+
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = c->btree_cache_used * c->btree_pages;
+ /* first run to clear b->accessed tag */
+ c->shrink.scan_objects(&c->shrink, &sc);
+ /* second run to reap non-accessed nodes */
+ c->shrink.scan_objects(&c->shrink, &sc);
+ }
+
bch_journal_mark(c, &journal);
bch_initial_gc_finish(c);
pr_debug("btree_check() done");
@@ -1880,7 +1939,9 @@ static void run_cache_set(struct cache_set *c)
if (j->version < BCACHE_JSET_VERSION_UUID)
__uuid_write(c);
- bch_journal_replay(c, &journal);
+ err = "bcache: replay journal failed";
+ if (bch_journal_replay(c, &journal))
+ goto err;
} else {
pr_notice("invalidating existing data");
@@ -1948,11 +2009,19 @@ static void run_cache_set(struct cache_set *c)
flash_devs_run(c);
set_bit(CACHE_SET_RUNNING, &c->flags);
- return;
+ return 0;
err:
+ while (!list_empty(&journal)) {
+ l = list_first_entry(&journal, struct journal_replay, list);
+ list_del(&l->list);
+ kfree(l);
+ }
+
closure_sync(&cl);
- /* XXX: test this, it's broken */
+
bch_cache_set_error(c, "%s", err);
+
+ return -EIO;
}
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
@@ -2016,8 +2085,11 @@ found:
ca->set->cache[ca->sb.nr_this_dev] = ca;
c->cache_by_alloc[c->caches_loaded++] = ca;
- if (c->caches_loaded == c->sb.nr_in_set)
- run_cache_set(c);
+ if (c->caches_loaded == c->sb.nr_in_set) {
+ err = "failed to run cache set";
+ if (run_cache_set(c) < 0)
+ goto err;
+ }
return NULL;
err:
@@ -2027,6 +2099,7 @@ err:
/* Cache device */
+/* When ca->kobj released */
void bch_cache_release(struct kobject *kobj)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
@@ -2191,6 +2264,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ret = cache_alloc(ca);
if (ret != 0) {
+ /*
+ * If we failed here, it means ca->kobj is not initialized yet,
+ * kobject_put() won't be called and there is no chance to
+ * call blkdev_put() to bdev in bch_cache_release(). So we
+ * explicitly call blkdev_put() here.
+ */
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
@@ -2234,9 +2313,13 @@ err:
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size);
+static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
+ struct kobj_attribute *attr,
+ const char *buffer, size_t size);
kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
+kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
static bool bch_is_open_backing(struct block_device *bdev)
{
@@ -2274,7 +2357,7 @@ static bool bch_is_open(struct block_device *bdev)
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
- ssize_t ret = size;
+ ssize_t ret = -EINVAL;
const char *err = "cannot allocate memory";
char *path = NULL;
struct cache_sb *sb = NULL;
@@ -2284,6 +2367,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!try_module_get(THIS_MODULE))
return -EBUSY;
+ /* For latest state of bcache_is_reboot */
+ smp_mb();
+ if (bcache_is_reboot)
+ return -EBUSY;
+
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
goto err;
@@ -2308,7 +2396,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
- goto out;
+ goto quiet_out;
}
goto err;
}
@@ -2329,17 +2417,23 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
goto err_close;
mutex_lock(&bch_register_lock);
- register_bdev(sb, sb_page, bdev, dc);
+ ret = register_bdev(sb, sb_page, bdev, dc);
mutex_unlock(&bch_register_lock);
+ /* blkdev_put() will be called in cached_dev_free() */
+ if (ret < 0)
+ goto err;
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
goto err_close;
+ /* blkdev_put() will be called in bch_cache_release() */
if (register_cache(sb, sb_page, bdev, ca) != 0)
goto err;
}
+quiet_out:
+ ret = size;
out:
if (sb_page)
put_page(sb_page);
@@ -2352,12 +2446,64 @@ err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
pr_info("error %s: %s", path, err);
- ret = -EINVAL;
goto out;
}
+
+struct pdev {
+ struct list_head list;
+ struct cached_dev *dc;
+};
+
+static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
+ struct kobj_attribute *attr,
+ const char *buffer,
+ size_t size)
+{
+ LIST_HEAD(pending_devs);
+ ssize_t ret = size;
+ struct cached_dev *dc, *tdc;
+ struct pdev *pdev, *tpdev;
+ struct cache_set *c, *tc;
+
+ mutex_lock(&bch_register_lock);
+ list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
+ pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
+ if (!pdev)
+ break;
+ pdev->dc = dc;
+ list_add(&pdev->list, &pending_devs);
+ }
+
+ list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
+ char *set_uuid = c->sb.uuid;
+
+ if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
+ list_del(&pdev->list);
+ kfree(pdev);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&bch_register_lock);
+
+ list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ pr_info("delete pdev %p", pdev);
+ list_del(&pdev->list);
+ bcache_device_stop(&pdev->dc->disk);
+ kfree(pdev);
+ }
+
+ return ret;
+}
+
static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
{
+ if (bcache_is_reboot)
+ return NOTIFY_DONE;
+
if (code == SYS_DOWN ||
code == SYS_HALT ||
code == SYS_POWER_OFF) {
@@ -2370,22 +2516,57 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
mutex_lock(&bch_register_lock);
+ if (bcache_is_reboot)
+ goto out;
+
+ /* New registration is rejected since now */
+ bcache_is_reboot = true;
+ /*
+ * Make registering caller (if there is) on other CPU
+ * core know bcache_is_reboot set to true earlier
+ */
+ smp_mb();
+
if (list_empty(&bch_cache_sets) &&
list_empty(&uncached_devices))
goto out;
+ mutex_unlock(&bch_register_lock);
+
pr_info("Stopping all devices:");
+ /*
+ * The reason bch_register_lock is not held to call
+ * bch_cache_set_stop() and bcache_device_stop() is to
+ * avoid potential deadlock during reboot, because cache
+ * set or bcache device stopping process will acqurie
+ * bch_register_lock too.
+ *
+ * We are safe here because bcache_is_reboot sets to
+ * true already, register_bcache() will reject new
+ * registration now. bcache_is_reboot also makes sure
+ * bcache_reboot() won't be re-entered on by other thread,
+ * so there is no race in following list iteration by
+ * list_for_each_entry_safe().
+ */
list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
bch_cache_set_stop(c);
list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
bcache_device_stop(&dc->disk);
+
+ /*
+ * Give an early chance for other kthreads and
+ * kworkers to stop themselves
+ */
+ schedule();
+
/* What's a condition variable? */
while (1) {
- long timeout = start + 2 * HZ - jiffies;
+ long timeout = start + 10 * HZ - jiffies;
+ mutex_lock(&bch_register_lock);
stopped = list_empty(&bch_cache_sets) &&
list_empty(&uncached_devices);
@@ -2397,7 +2578,6 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
mutex_unlock(&bch_register_lock);
schedule_timeout(timeout);
- mutex_lock(&bch_register_lock);
}
finish_wait(&unregister_wait, &wait);
@@ -2466,6 +2646,7 @@ static int __init bcache_init(void)
static const struct attribute *files[] = {
&ksysfs_register.attr,
&ksysfs_register_quiet.attr,
+ &ksysfs_pendings_cleanup.attr,
NULL
};
@@ -2501,6 +2682,8 @@ static int __init bcache_init(void)
bch_debug_init();
closure_debug_init();
+ bcache_is_reboot = false;
+
return 0;
err:
bcache_exit();
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 00095344a23d..1c19a4b652e1 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -15,33 +15,31 @@
#include <linux/sort.h>
#include <linux/sched/clock.h>
+extern bool bcache_is_reboot;
+
/* Default is 0 ("writethrough") */
static const char * const bch_cache_modes[] = {
"writethrough",
"writeback",
"writearound",
- "none",
- NULL
+ "none"
};
/* Default is 0 ("auto") */
static const char * const bch_stop_on_failure_modes[] = {
"auto",
- "always",
- NULL
+ "always"
};
static const char * const cache_replacement_policies[] = {
"lru",
"fifo",
- "random",
- NULL
+ "random"
};
static const char * const error_actions[] = {
"unregister",
- "panic",
- NULL
+ "panic"
};
write_attribute(attach);
@@ -83,8 +81,8 @@ read_attribute(bset_tree_stats);
read_attribute(state);
read_attribute(cache_read_races);
read_attribute(reclaim);
+read_attribute(reclaimed_journal_buckets);
read_attribute(flush_write);
-read_attribute(retry_flush_write);
read_attribute(writeback_keys_done);
read_attribute(writeback_keys_failed);
read_attribute(io_errors);
@@ -179,7 +177,7 @@ SHOW(__bch_cached_dev)
var_print(writeback_percent);
sysfs_hprint(writeback_rate,
wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
- sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
+ sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
sysfs_printf(io_error_limit, "%i", dc->error_limit);
sysfs_printf(io_disable, "%i", dc->io_disable);
var_print(writeback_rate_update_seconds);
@@ -270,6 +268,10 @@ STORE(__cached_dev)
struct cache_set *c;
struct kobj_uevent_env *env;
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
@@ -328,11 +330,14 @@ STORE(__cached_dev)
bch_cache_accounting_clear(&dc->accounting);
if (attr == &sysfs_running &&
- strtoul_or_return(buf))
- bch_cached_dev_run(dc);
+ strtoul_or_return(buf)) {
+ v = bch_cached_dev_run(dc);
+ if (v)
+ return v;
+ }
if (attr == &sysfs_cache_mode) {
- v = __sysfs_match_string(bch_cache_modes, -1, buf);
+ v = sysfs_match_string(bch_cache_modes, buf);
if (v < 0)
return v;
@@ -343,7 +348,7 @@ STORE(__cached_dev)
}
if (attr == &sysfs_stop_when_cache_set_failed) {
- v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
+ v = sysfs_match_string(bch_stop_on_failure_modes, buf);
if (v < 0)
return v;
@@ -407,6 +412,10 @@ STORE(bch_cached_dev)
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
mutex_lock(&bch_register_lock);
size = __cached_dev_store(kobj, attr, buf, size);
@@ -430,8 +439,13 @@ STORE(bch_cached_dev)
bch_writeback_queue(dc);
}
+ /*
+ * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
+ * a cache set, otherwise it doesn't make sense.
+ */
if (attr == &sysfs_writeback_percent)
- if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
+ if ((dc->disk.c != NULL) &&
+ (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
@@ -458,7 +472,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_p_term_inverse,
&sysfs_writeback_rate_minimum,
&sysfs_writeback_rate_debug,
- &sysfs_errors,
+ &sysfs_io_errors,
&sysfs_io_error_limit,
&sysfs_io_disable,
&sysfs_dirty_data,
@@ -505,6 +519,10 @@ STORE(__bch_flash_dev)
kobj);
struct uuid_entry *u = &d->c->uuids[d->id];
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
sysfs_strtoul(data_csum, d->data_csum);
if (attr == &sysfs_size) {
@@ -687,12 +705,12 @@ SHOW(__bch_cache_set)
sysfs_print(reclaim,
atomic_long_read(&c->reclaim));
+ sysfs_print(reclaimed_journal_buckets,
+ atomic_long_read(&c->reclaimed_journal_buckets));
+
sysfs_print(flush_write,
atomic_long_read(&c->flush_write));
- sysfs_print(retry_flush_write,
- atomic_long_read(&c->retry_flush_write));
-
sysfs_print(writeback_keys_done,
atomic_long_read(&c->writeback_keys_done));
sysfs_print(writeback_keys_failed,
@@ -740,6 +758,10 @@ STORE(__bch_cache_set)
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
ssize_t v;
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
if (attr == &sysfs_unregister)
bch_cache_set_unregister(c);
@@ -793,7 +815,7 @@ STORE(__bch_cache_set)
0, UINT_MAX);
if (attr == &sysfs_errors) {
- v = __sysfs_match_string(error_actions, -1, buf);
+ v = sysfs_match_string(error_actions, buf);
if (v < 0)
return v;
@@ -859,6 +881,10 @@ STORE(bch_cache_set_internal)
{
struct cache_set *c = container_of(kobj, struct cache_set, internal);
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
return bch_cache_set_store(&c->kobj, attr, buf, size);
}
@@ -908,8 +934,8 @@ static struct attribute *bch_cache_set_internal_files[] = {
&sysfs_bset_tree_stats,
&sysfs_cache_read_races,
&sysfs_reclaim,
+ &sysfs_reclaimed_journal_buckets,
&sysfs_flush_write,
- &sysfs_retry_flush_write,
&sysfs_writeback_keys_done,
&sysfs_writeback_keys_failed,
@@ -994,8 +1020,6 @@ SHOW(__bch_cache)
!cached[n - 1])
--n;
- unused = ca->sb.nbuckets - n;
-
while (cached < p + n &&
*cached == BTREE_PRIO)
cached++, n--;
@@ -1045,6 +1069,10 @@ STORE(__bch_cache)
struct cache *ca = container_of(kobj, struct cache, kobj);
ssize_t v;
+ /* no user space access if system is rebooting */
+ if (bcache_is_reboot)
+ return -EBUSY;
+
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
@@ -1058,7 +1086,7 @@ STORE(__bch_cache)
}
if (attr == &sysfs_cache_replacement_policy) {
- v = __sysfs_match_string(cache_replacement_policies, -1, buf);
+ v = sysfs_match_string(cache_replacement_policies, buf);
if (v < 0)
return v;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ae3dd69439a0..2bbda858f436 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -112,8 +112,6 @@ do { \
#define heap_full(h) ((h)->used == (h)->size)
-#define heap_empty(h) ((h)->used == 0)
-
#define DECLARE_FIFO(type, name) \
struct { \
size_t front, back, size, mask; \
@@ -559,17 +557,29 @@ static inline uint64_t bch_crc64_update(uint64_t crc,
return crc;
}
-/* Does linear interpolation between powers of two */
+/*
+ * A stepwise-linear pseudo-exponential. This returns 1 << (x >>
+ * frac_bits), with the less-significant bits filled in by linear
+ * interpolation.
+ *
+ * This can also be interpreted as a floating-point number format,
+ * where the low frac_bits are the mantissa (with implicit leading
+ * 1 bit), and the more significant bits are the exponent.
+ * The return value is 1.mantissa * 2^exponent.
+ *
+ * The way this is used, fract_bits is 6 and the largest possible
+ * input is CONGESTED_MAX-1 = 1023 (exponent 16, mantissa 0x1.fc),
+ * so the maximum output is 0x1fc00.
+ */
static inline unsigned int fract_exp_two(unsigned int x,
unsigned int fract_bits)
{
- unsigned int fract = x & ~(~0 << fract_bits);
-
- x >>= fract_bits;
- x = 1 << x;
- x += (x * fract) >> fract_bits;
+ unsigned int mantissa = 1 << fract_bits; /* Implicit bit */
- return x;
+ mantissa += x & (mantissa - 1);
+ x >>= fract_bits; /* The exponent */
+ /* Largest intermediate value 0x7f0000 */
+ return mantissa << x >> fract_bits;
}
void bch_bio_map(struct bio *bio, void *base);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 072bd065c30f..9db234b6f253 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -121,6 +121,9 @@ static void __update_writeback_rate(struct cached_dev *dc)
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
+ /* Don't set max writeback rate if gc is running */
+ if (!c->gc_mark_valid)
+ return false;
/*
* Idle_counter is increased everytime when update_writeback_rate() is
* called. If all backing devices attached to the same cache set have
@@ -734,6 +737,10 @@ static int bch_writeback_thread(void *arg)
}
}
+ if (dc->writeback_write_wq) {
+ flush_workqueue(dc->writeback_write_wq);
+ destroy_workqueue(dc->writeback_write_wq);
+ }
cached_dev_put(dc);
wait_for_kthread_stop();
@@ -829,6 +836,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
"bcache_writeback");
if (IS_ERR(dc->writeback_thread)) {
cached_dev_put(dc);
+ destroy_workqueue(dc->writeback_write_wq);
return PTR_ERR(dc->writeback_thread);
}
dc->writeback_running = true;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index a8bda6679422..8028ecdc157f 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -199,7 +199,6 @@ struct mcam_vb_buffer {
struct list_head queue;
struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
dma_addr_t dma_desc_pa; /* Descriptor physical address */
- int dma_desc_nent; /* Number of mapped descriptors */
};
static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
@@ -606,9 +605,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
static void mcam_sg_next_buffer(struct mcam_camera *cam)
{
struct mcam_vb_buffer *buf;
+ struct sg_table *sg_table;
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
list_del_init(&buf->queue);
+ sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
/*
* Very Bad Not Good Things happen if you don't clear
* C1_DESC_ENA before making any descriptor changes.
@@ -616,7 +617,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
mcam_reg_write(cam, REG_DESC_LEN_Y,
- buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+ sg_table->nents * sizeof(struct mcam_dma_desc));
mcam_reg_write(cam, REG_DESC_LEN_U, 0);
mcam_reg_write(cam, REG_DESC_LEN_V, 0);
mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index eb85cedc5ef3..5e080f32b0e8 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -38,6 +38,11 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
for (i = 0; i < pm->num_clocks; i++) {
pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
if (IS_ERR(pm->clocks[i])) {
+ /* additional clocks are optional */
+ if (i && PTR_ERR(pm->clocks[i]) == -ENOENT) {
+ pm->clocks[i] = NULL;
+ continue;
+ }
mfc_err("Failed to get clock: %s\n",
pm->clk_names[i]);
return PTR_ERR(pm->clocks[i]);
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
index bdc380b14e0c..a95b7c56569e 100644
--- a/drivers/media/platform/vivid/vivid-osd.c
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -167,7 +167,7 @@ static int _vivid_fb_check_var(struct fb_var_screeninfo *var, struct vivid_dev *
var->nonstd = 0;
var->vmode &= ~FB_VMODE_MASK;
- var->vmode = FB_VMODE_NONINTERLACED;
+ var->vmode |= FB_VMODE_NONINTERLACED;
/* Dummy values */
var->hsync_len = 24;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 1c7e16e5d88b..0d07730d4561 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -901,7 +901,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
cpia2_unregister_camera(cam);
v4l2_device_disconnect(&cam->v4l2_dev);
mutex_unlock(&cam->v4l2_lock);
- v4l2_device_put(&cam->v4l2_dev);
if(cam->buffers) {
DBG("Wakeup waiting processes\n");
@@ -913,6 +912,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
DBG("Releasing interface\n");
usb_driver_release_interface(&cpia2_driver, intf);
+ v4l2_device_put(&cam->v4l2_dev);
+
LOG("CPiA2 camera disconnected.\n");
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index bd49b698c02c..a74275168ef5 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -2109,16 +2109,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
&def, &flags);
- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
- cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU);
if (is_menu)
WARN_ON(step);
else
WARN_ON(cfg->menu_skip_mask);
- if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
+ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
qmenu = v4l2_ctrl_get_menu(cfg->id);
- else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
- qmenu_int == NULL) {
+ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 1d49a8dd4a37..7c040a3b45be 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -72,7 +72,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
u32 value;
/* compute the number of MC clock cycles per tick */
- tick = mc->tick * clk_get_rate(mc->clk);
+ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
do_div(tick, NSEC_PER_SEC);
value = readl(mc->regs + MC_EMEM_ARB_CFG);
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 1246d69ba187..b1564cacd19e 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -629,13 +629,18 @@ static int __init memstick_init(void)
return -ENOMEM;
rc = bus_register(&memstick_bus_type);
- if (!rc)
- rc = class_register(&memstick_host_class);
+ if (rc)
+ goto error_destroy_workqueue;
- if (!rc)
- return 0;
+ rc = class_register(&memstick_host_class);
+ if (rc)
+ goto error_bus_unregister;
+
+ return 0;
+error_bus_unregister:
bus_unregister(&memstick_bus_type);
+error_destroy_workqueue:
destroy_workqueue(workqueue);
return rc;
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 22dd8c055048..71cecd7aeea0 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -533,6 +533,7 @@ module_init(intel_lpss_init);
static void __exit intel_lpss_exit(void)
{
+ ida_destroy(&intel_lpss_devid_ida);
debugfs_remove(intel_lpss_debugfs);
}
module_exit(intel_lpss_exit);
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 340b44d9e8cf..ae2be9194a62 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/numa.h>
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
@@ -61,7 +62,7 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
-static int xpc_mq_node = -1;
+static int xpc_mq_node = NUMA_NO_NODE;
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 115ff419992a..84ff70b91fed 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1769,8 +1769,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
if (slot->cd_idx >= 0) {
- ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
slot->cd_override_level, 0, NULL);
+ if (ret && ret != -EPROBE_DEFER)
+ ret = mmc_gpiod_request_cd(host->mmc, NULL,
+ slot->cd_idx,
+ slot->cd_override_level,
+ 0, NULL);
if (ret == -EPROBE_DEFER)
goto remove;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index d41f3c35191d..b33328c10976 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1070,13 +1070,6 @@ static int bond_option_arp_validate_set(struct bonding *bond,
{
netdev_info(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
-
- if (bond->dev->flags & IFF_UP) {
- if (!newval->value)
- bond->recv_probe = NULL;
- else if (bond->params.arp_interval)
- bond->recv_probe = bond_arp_rcv;
- }
bond->params.arp_validate = newval->value;
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index aa346b93c4a8..bbc1d3c425c7 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1648,7 +1648,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
qm_sg_entry_get_len(&sgt[0]), dma_dir);
/* remaining pages were mapped with skb_frag_dma_map() */
- for (i = 1; i < nr_frags; i++) {
+ for (i = 1; i <= nr_frags; i++) {
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 83d041f4fc0f..b3b91375cce6 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3518,7 +3518,7 @@ failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_reset:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
failed_regulator:
failed_clk_ipg:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 22bcf4197093..7ec24769c7bb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -52,6 +52,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
+#include <linux/numa.h>
#include <scsi/fc/fc_fcoe.h>
#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
@@ -6442,7 +6443,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int orig_node = dev_to_node(dev);
- int ring_node = -1;
+ int ring_node = NUMA_NO_NODE;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -6536,7 +6537,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
{
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
- int ring_node = -1;
+ int ring_node = NUMA_NO_NODE;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 8ea9362c3894..016ce7e4709f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4477,7 +4477,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
- goto err_free_stats;
+ goto err_netdev;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -4488,13 +4488,11 @@ static int mvneta_probe(struct platform_device *pdev)
return 0;
err_netdev:
- unregister_netdev(dev);
if (pp->bm_priv) {
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
}
-err_free_stats:
free_percpu(pp->stats);
err_free_ports:
free_percpu(pp->ports);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 2f21a63fa48e..ade18a88f8ca 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1432,7 +1432,7 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
/* Set defaults to the MVPP2 port */
static void mvpp2_defaults_set(struct mvpp2_port *port)
{
- int tx_port_num, val, queue, ptxq, lrxq;
+ int tx_port_num, val, queue, lrxq;
if (port->priv->hw_version == MVPP21) {
/* Update TX FIFO MIN Threshold */
@@ -1453,11 +1453,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
/* Close bandwidth for all queues */
- for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
- ptxq = mvpp2_txq_phys(port->id, queue);
+ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
mvpp2_write(port->priv,
- MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
- }
+ MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
/* Set refill period to 1 usec, refill tokens
* and bucket size to maximum
@@ -2313,7 +2311,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
txq->descs_dma = 0;
/* Set minimum bandwidth for disabled TXQs */
- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
/* Set Tx descriptors queue starting address and size */
thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index ffed2d4c9403..9c481823b3e8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1492,7 +1492,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
rule.port = port;
rule.qpn = qpn;
INIT_LIST_HEAD(&rule.list);
- mlx4_err(dev, "going promisc on %x\n", port);
+ mlx4_info(dev, "going promisc on %x\n", port);
return mlx4_flow_attach(dev, &rule, regid_p);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 1707ff70eca8..13080def90cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2249,7 +2249,7 @@ static struct mlx5_flow_root_namespace
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
/* Create the root namespace */
- root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
+ root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
return NULL;
@@ -2392,6 +2392,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->esw_egress_root_ns[i]);
kfree(steering->esw_egress_root_ns);
+ steering->esw_egress_root_ns = NULL;
}
static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
@@ -2406,6 +2407,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
kfree(steering->esw_ingress_root_ns);
+ steering->esw_ingress_root_ns = NULL;
}
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
@@ -2574,6 +2576,7 @@ cleanup_root_ns:
for (i--; i >= 0; i--)
cleanup_root_ns(steering->esw_egress_root_ns[i]);
kfree(steering->esw_egress_root_ns);
+ steering->esw_egress_root_ns = NULL;
return err;
}
@@ -2601,6 +2604,7 @@ cleanup_root_ns:
for (i--; i >= 0; i--)
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
kfree(steering->esw_ingress_root_ns);
+ steering->esw_ingress_root_ns = NULL;
return err;
}
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 70347720fdf9..4393d32b58c1 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -734,6 +734,7 @@ static int sgiseeq_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
sp = netdev_priv(dev);
/* Make private data page aligned */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ff2eeddf588e..4c4298f1ebee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -157,7 +157,8 @@ int stmmac_mdio_reset(struct mii_bus *bus)
of_property_read_u32_array(np,
"snps,reset-delays-us", data->delays, 3);
- if (gpio_request(data->reset_gpio, "mdio-reset"))
+ if (devm_gpio_request(priv->device, data->reset_gpio,
+ "mdio-reset"))
return 0;
}
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index b5edc7f96a39..685e875f5164 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = {
static int __init deflate_init(void)
{
- int answer = ppp_register_compressor(&ppp_deflate);
- if (answer == 0)
- printk(KERN_INFO
- "PPP Deflate Compression module registered\n");
- ppp_register_compressor(&ppp_deflate_draft);
- return answer;
+ int rc;
+
+ rc = ppp_register_compressor(&ppp_deflate);
+ if (rc)
+ return rc;
+
+ rc = ppp_register_compressor(&ppp_deflate_draft);
+ if (rc) {
+ ppp_unregister_compressor(&ppp_deflate);
+ return rc;
+ }
+
+ pr_info("PPP Deflate Compression module registered\n");
+ return 0;
}
static void __exit deflate_cleanup(void)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 9e8098e3cdac..d6d202d0b6b0 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2141,12 +2141,12 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->hw_features = TEAM_VLAN_FEATURES |
- NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
dev->features |= dev->hw_features;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
static int team_newlink(struct net *src_net, struct net_device *dev,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index c4b63d86da05..0f11daa42b86 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -695,6 +695,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
+ rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
+ NULL);
--tun->numqueues;
if (clean) {
@@ -1077,7 +1079,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
tfile = rcu_dereference(tun->tfiles[txq]);
/* Drop packet if interface is not attached */
- if (txq >= tun->numqueues)
+ if (!tfile)
goto drop;
if (!rcu_dereference(tun->steering_prog))
@@ -1284,6 +1286,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
rcu_read_lock();
+resample:
numqueues = READ_ONCE(tun->numqueues);
if (!numqueues) {
rcu_read_unlock();
@@ -1292,6 +1295,8 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
numqueues]);
+ if (unlikely(!tfile))
+ goto resample;
spin_lock(&tfile->tx_ring.producer_lock);
for (i = 0; i < n; i++) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index edc42088f214..4ca6bddacc48 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1322,6 +1322,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 5d6334c32db5..d784e4a4aa17 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5459,8 +5459,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
- u8 rate;
- int rateidx, ret = 0;
+ u8 rate, rateidx;
+ int ret = 0, mcast_rate;
enum nl80211_band band;
mutex_lock(&ar->conf_mutex);
@@ -5632,7 +5632,11 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MCAST_RATE &&
!WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
band = def.chan->band;
- rateidx = vif->bss_conf.mcast_rate[band] - 1;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 7f61591ce0de..cb527a21f1ac 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -613,6 +613,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
full_len,
last_in_bundle,
last_in_bundle);
+ if (ret) {
+ ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
+ goto err;
+ }
}
ar_sdio->n_rx_pkts = i;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 0fcfb51642ae..ae5172a0a070 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -2692,8 +2692,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
- ieee80211_has_protected(hdr->frame_control))
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
buf_len = round_up(buf_len, 4);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 9d7ac1ab2d02..9f01c8934b8c 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1178,6 +1178,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
return -EINVAL;
ev = (struct wmi_pstream_timeout_event *) datap;
+ if (ev->traffic_class >= WMM_NUM_AC) {
+ ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
+ return -EINVAL;
+ }
/*
* When the pstream (fat pipe == AC) timesout, it means there were
@@ -1519,6 +1523,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
reply = (struct wmi_cac_event *) datap;
+ if (reply->ac >= WMM_NUM_AC) {
+ ath6kl_err("invalid AC: %d\n", reply->ac);
+ return -EINVAL;
+ }
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
@@ -2635,7 +2643,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
u16 active_tsids = 0;
int ret;
- if (traffic_class > 3) {
+ if (traffic_class >= WMM_NUM_AC) {
ath6kl_err("invalid traffic class: %d\n", traffic_class);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a8ac42c96d71..35c4a7f263b1 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -815,6 +815,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr;
bool discard_current = sc->rx.discard_next;
+ bool is_phyerr;
/*
* Discard corrupt descriptors which are marked in
@@ -827,8 +828,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
/*
* Discard zero-length packets and packets smaller than an ACK
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
*/
- if (rx_stats->rs_datalen < 10) {
+ is_phyerr = rx_stats->rs_status & ATH9K_RXERR_PHY;
+ if (!rx_stats->rs_datalen ||
+ (rx_stats->rs_datalen < 10 && !is_phyerr)) {
RX_STAT_INC(rx_len_err);
goto corrupt;
}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index e7c3f3b8457d..99f1897a775d 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -128,6 +128,8 @@ static const struct usb_device_id carl9170_usb_ids[] = {
};
MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
+static struct usb_driver carl9170_driver;
+
static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
{
struct urb *urb;
@@ -966,32 +968,28 @@ err_out:
static void carl9170_usb_firmware_failed(struct ar9170 *ar)
{
- struct device *parent = ar->udev->dev.parent;
- struct usb_device *udev;
-
- /*
- * Store a copy of the usb_device pointer locally.
- * This is because device_release_driver initiates
- * carl9170_usb_disconnect, which in turn frees our
- * driver context (ar).
+ /* Store a copies of the usb_interface and usb_device pointer locally.
+ * This is because release_driver initiates carl9170_usb_disconnect,
+ * which in turn frees our driver context (ar).
*/
- udev = ar->udev;
+ struct usb_interface *intf = ar->intf;
+ struct usb_device *udev = ar->udev;
complete(&ar->fw_load_wait);
+ /* at this point 'ar' could be already freed. Don't use it anymore */
+ ar = NULL;
/* unbind anything failed */
- if (parent)
- device_lock(parent);
-
- device_release_driver(&udev->dev);
- if (parent)
- device_unlock(parent);
+ usb_lock_device(udev);
+ usb_driver_release_interface(&carl9170_driver, intf);
+ usb_unlock_device(udev);
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
static void carl9170_usb_firmware_finish(struct ar9170 *ar)
{
+ struct usb_interface *intf = ar->intf;
int err;
err = carl9170_parse_firmware(ar);
@@ -1009,7 +1007,7 @@ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
goto err_unrx;
complete(&ar->fw_load_wait);
- usb_put_dev(ar->udev);
+ usb_put_intf(intf);
return;
err_unrx:
@@ -1052,7 +1050,6 @@ static int carl9170_usb_probe(struct usb_interface *intf,
return PTR_ERR(ar);
udev = interface_to_usbdev(intf);
- usb_get_dev(udev);
ar->udev = udev;
ar->intf = intf;
ar->features = id->driver_info;
@@ -1094,15 +1091,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
atomic_set(&ar->rx_anch_urbs, 0);
atomic_set(&ar->rx_pool_urbs, 0);
- usb_get_dev(ar->udev);
+ usb_get_intf(intf);
carl9170_set_state(ar, CARL9170_STOPPED);
err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
&ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
if (err) {
- usb_put_dev(udev);
- usb_put_dev(udev);
+ usb_put_intf(intf);
carl9170_free(ar);
}
return err;
@@ -1131,7 +1127,6 @@ static void carl9170_usb_disconnect(struct usb_interface *intf)
carl9170_release_firmware(ar);
carl9170_free(ar);
- usb_put_dev(udev);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 75c8aa297107..1b1b58e0129a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -736,6 +736,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
[GRO_HELD] = "GRO_HELD",
[GRO_NORMAL] = "GRO_NORMAL",
[GRO_DROP] = "GRO_DROP",
+ [GRO_CONSUMED] = "GRO_CONSUMED",
};
wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 8ba08ae3e506..972f9ff0d74c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1547,7 +1547,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto free;
out_free_fw:
- iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
out_unbind:
complete(&drv->request_firmware_complete);
diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
index 043bd1c23c19..4a197a32d78c 100644
--- a/drivers/net/wireless/intersil/p54/p54usb.c
+++ b/drivers/net/wireless/intersil/p54/p54usb.c
@@ -33,6 +33,8 @@ MODULE_ALIAS("prism54usb");
MODULE_FIRMWARE("isl3886usb");
MODULE_FIRMWARE("isl3887usb");
+static struct usb_driver p54u_driver;
+
/*
* Note:
*
@@ -921,9 +923,9 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
{
struct p54u_priv *priv = context;
struct usb_device *udev = priv->udev;
+ struct usb_interface *intf = priv->intf;
int err;
- complete(&priv->fw_wait_load);
if (firmware) {
priv->fw = firmware;
err = p54u_start_ops(priv);
@@ -932,26 +934,22 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
dev_err(&udev->dev, "Firmware not found.\n");
}
- if (err) {
- struct device *parent = priv->udev->dev.parent;
-
- dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
-
- if (parent)
- device_lock(parent);
+ complete(&priv->fw_wait_load);
+ /*
+ * At this point p54u_disconnect may have already freed
+ * the "priv" context. Do not use it anymore!
+ */
+ priv = NULL;
- device_release_driver(&udev->dev);
- /*
- * At this point p54u_disconnect has already freed
- * the "priv" context. Do not use it anymore!
- */
- priv = NULL;
+ if (err) {
+ dev_err(&intf->dev, "failed to initialize device (%d)\n", err);
- if (parent)
- device_unlock(parent);
+ usb_lock_device(udev);
+ usb_driver_release_interface(&p54u_driver, intf);
+ usb_unlock_device(udev);
}
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
static int p54u_load_firmware(struct ieee80211_hw *dev,
@@ -972,14 +970,14 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
dev_info(&priv->udev->dev, "Loading firmware file %s\n",
p54u_fwlist[i].fw);
- usb_get_dev(udev);
+ usb_get_intf(intf);
err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
device, GFP_KERNEL, priv,
p54u_load_firmware_cb);
if (err) {
dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
"(%d)!\n", p54u_fwlist[i].fw, err);
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
return err;
@@ -1011,8 +1009,6 @@ static int p54u_probe(struct usb_interface *intf,
skb_queue_head_init(&priv->rx_queue);
init_usb_anchor(&priv->submitted);
- usb_get_dev(udev);
-
/* really lazy and simple way of figuring out if we're a 3887 */
/* TODO: should just stick the identification in the device table */
i = intf->altsetting->desc.bNumEndpoints;
@@ -1053,10 +1049,8 @@ static int p54u_probe(struct usb_interface *intf,
priv->upload_fw = p54u_upload_firmware_net2280;
}
err = p54u_load_firmware(dev, intf);
- if (err) {
- usb_put_dev(udev);
+ if (err)
p54_free_common(dev);
- }
return err;
}
@@ -1072,7 +1066,6 @@ static void p54u_disconnect(struct usb_interface *intf)
wait_for_completion(&priv->fw_wait_load);
p54_unregister_common(dev);
- usb_put_dev(interface_to_usbdev(intf));
release_firmware(priv->fw);
p54_free_common(dev);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index b73f99dc5a72..cec48ff88446 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1759,9 +1759,16 @@ struct mwifiex_ie_types_wmm_queue_status {
struct ieee_types_vendor_header {
u8 element_id;
u8 len;
+#ifdef __GENKSYMS__
u8 oui[4]; /* 0~2: oui, 3: oui_type */
u8 oui_subtype;
u8 version;
+#else
+ struct {
+ u8 oui[3];
+ u8 oui_type;
+ } __packed oui;
+#endif
} __packed;
struct ieee_types_wmm_parameter {
@@ -1775,6 +1782,11 @@ struct ieee_types_wmm_parameter {
* Version [1]
*/
struct ieee_types_vendor_header vend_hdr;
+#ifndef __GENKSYMS__
+ u8 oui_subtype;
+ u8 version;
+#endif
+
u8 qos_info_bitmap;
u8 reserved;
struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
@@ -1792,6 +1804,10 @@ struct ieee_types_wmm_info {
* Version [1]
*/
struct ieee_types_vendor_header vend_hdr;
+#ifndef __GENKSYMS__
+ u8 oui_subtype;
+ u8 version;
+#endif
u8 qos_info_bitmap;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 80136153a8e8..6dd771ce68a3 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1361,21 +1361,25 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_VENDOR_SPECIFIC:
- if (element_len + 2 < sizeof(vendor_ie->vend_hdr))
- return -EINVAL;
-
vendor_ie = (struct ieee_types_vendor_specific *)
current_ptr;
- if (!memcmp
- (vendor_ie->vend_hdr.oui, wpa_oui,
- sizeof(wpa_oui))) {
+ /* 802.11 requires at least 3-byte OUI. */
+ if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
+ return -EINVAL;
+
+ /* Not long enough for a match? Skip it. */
+ if (element_len < sizeof(wpa_oui))
+ break;
+
+ if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
+ sizeof(wpa_oui))) {
bss_entry->bcn_wpa_ie =
(struct ieee_types_vendor_specific *)
current_ptr;
bss_entry->wpa_offset = (u16)
(current_ptr - bss_entry->beacon_buf);
- } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
+ } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
sizeof(wmm_oui))) {
if (total_ie_len ==
sizeof(struct ieee_types_wmm_parameter) ||
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index b454b5f85503..843d65bba181 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1348,7 +1348,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
/* Test to see if it is a WPA IE, if not, then
* it is a gen IE
*/
- if (!memcmp(pvendor_ie->oui, wpa_oui,
+ if (!memcmp(&pvendor_ie->oui, wpa_oui,
sizeof(wpa_oui))) {
/* IE is a WPA/WPA2 IE so call set_wpa function
*/
@@ -1358,7 +1358,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
goto next_ie;
}
- if (!memcmp(pvendor_ie->oui, wps_oui,
+ if (!memcmp(&pvendor_ie->oui, wps_oui,
sizeof(wps_oui))) {
/* Test to see if it is a WPS IE,
* if so, enable wps session flag
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 407b9932ca4d..64916ba15df5 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -240,7 +240,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, INFO,
"info: WMM Parameter IE: version=%d,\t"
"qos_info Parameter Set Count=%d, Reserved=%#x\n",
- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+ wmm_ie->version, wmm_ie->qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
wmm_ie->reserved);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index a13a50e945a4..36f572ccfe17 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -390,6 +390,14 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
return -1;
for (i = 0; i < num_clients; i++) {
+ if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
+ clients[i]->dma_ops == &dma_virt_ops) {
+ if (verbose)
+ dev_warn(clients[i],
+ "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
+ return -1;
+ }
+
pci_client = find_parent_pci_dev(clients[i]);
if (!pci_client) {
if (verbose)
@@ -654,7 +662,7 @@ int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* p2pdma mappings are not compatible with devices that use
* dma_virt_ops. If the upper layers do the right thing
* this should never happen because it will be prevented
- * by the check in pci_p2pdma_add_client()
+ * by the check in pci_p2pdma_distance_many()
*/
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
dev->dma_ops == &dma_virt_ops))
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c89ca971f7d6..0058a7c7d4ef 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
#ifdef CONFIG_PCI_IOV
static inline bool pci_device_can_probe(struct pci_dev *pdev)
{
- return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
+ return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
+ pdev->driver_override);
}
#else
static inline bool pci_device_can_probe(struct pci_dev *pdev)
@@ -414,6 +415,9 @@ static int pci_device_probe(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = to_pci_driver(dev->driver);
+ if (!pci_device_can_probe(pci_dev))
+ return -ENODEV;
+
pci_assign_irq(pci_dev);
error = pcibios_alloc_irq(pci_dev);
@@ -421,12 +425,10 @@ static int pci_device_probe(struct device *dev)
return error;
pci_dev_get(pci_dev);
- if (pci_device_can_probe(pci_dev)) {
- error = __pci_device_probe(drv, pci_dev);
- if (error) {
- pcibios_free_irq(pci_dev);
- pci_dev_put(pci_dev);
- }
+ error = __pci_device_probe(drv, pci_dev);
+ if (error) {
+ pcibios_free_irq(pci_dev);
+ pci_dev_put(pci_dev);
}
return error;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e331aecbe22d..3c9fdbb460c7 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1207,6 +1207,8 @@ config HUAWEI_WMI
To compile this driver as a module, choose M here: the module
will be called huawei-wmi.
+source "drivers/platform/x86/intel_speed_select_if/Kconfig"
+
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 73ba83a0bdf0..2fbbf016a5f0 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -88,3 +88,4 @@ obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
diff --git a/drivers/platform/x86/intel_speed_select_if/Kconfig b/drivers/platform/x86/intel_speed_select_if/Kconfig
new file mode 100644
index 000000000000..ce3e3dc076d2
--- /dev/null
+++ b/drivers/platform/x86/intel_speed_select_if/Kconfig
@@ -0,0 +1,17 @@
+menu "Intel Speed Select Technology interface support"
+ depends on PCI
+ depends on X86_64 || COMPILE_TEST
+
+config INTEL_SPEED_SELECT_INTERFACE
+ tristate "Intel(R) Speed Select Technology interface drivers"
+ help
+ This config enables the Intel(R) Speed Select Technology interface
+ drivers. The Intel(R) speed select technology features are non
+ architectural and only supported on specific Xeon(R) servers.
+ These drivers provide interface to directly communicate with hardware
+ via MMIO and Mail boxes to enumerate and control all the speed select
+ features.
+
+ Enable this config, if there is a need to enable and control the
+ Intel(R) Speed Select Technology features from the user space.
+endmenu
diff --git a/drivers/platform/x86/intel_speed_select_if/Makefile b/drivers/platform/x86/intel_speed_select_if/Makefile
new file mode 100644
index 000000000000..856076206f35
--- /dev/null
+++ b/drivers/platform/x86/intel_speed_select_if/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile - Intel Speed Select Interface drivers
+# Copyright (c) 2019, Intel Corporation.
+#
+
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_common.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mmio.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_pci.o
+obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += isst_if_mbox_msr.o
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
new file mode 100644
index 000000000000..68d75391db57
--- /dev/null
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Common functions
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/cpuhotplug.h>
+#include <linux/fs.h>
+#include <linux/hashtable.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_if_common.h"
+
+#define MSR_THREAD_ID_INFO 0x53
+#define MSR_CPU_BUS_NUMBER 0x128
+
+static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
+
+static int punit_msr_white_list[] = {
+ MSR_TURBO_RATIO_LIMIT,
+ MSR_CONFIG_TDP_CONTROL,
+};
+
+struct isst_valid_cmd_ranges {
+ u16 cmd;
+ u16 sub_cmd_beg;
+ u16 sub_cmd_end;
+};
+
+struct isst_cmd_set_req_type {
+ u16 cmd;
+ u16 sub_cmd;
+ u16 param;
+};
+
+static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
+ {0xD0, 0x00, 0x03},
+ {0x7F, 0x00, 0x0B},
+ {0x7F, 0x10, 0x12},
+ {0x7F, 0x20, 0x23},
+};
+
+static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
+ {0xD0, 0x00, 0x08},
+ {0xD0, 0x01, 0x08},
+ {0xD0, 0x02, 0x08},
+ {0xD0, 0x03, 0x08},
+ {0x7F, 0x02, 0x00},
+ {0x7F, 0x08, 0x00},
+};
+
+struct isst_cmd {
+ struct hlist_node hnode;
+ u64 data;
+ u32 cmd;
+ int cpu;
+ int mbox_cmd_type;
+ u32 param;
+};
+
+static DECLARE_HASHTABLE(isst_hash, 8);
+static DEFINE_MUTEX(isst_hash_lock);
+
+static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
+ u32 data)
+{
+ struct isst_cmd *sst_cmd;
+
+ sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
+ if (!sst_cmd)
+ return -ENOMEM;
+
+ sst_cmd->cpu = cpu;
+ sst_cmd->cmd = cmd;
+ sst_cmd->mbox_cmd_type = mbox_cmd_type;
+ sst_cmd->param = param;
+ sst_cmd->data = data;
+
+ hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
+
+ return 0;
+}
+
+static void isst_delete_hash(void)
+{
+ struct isst_cmd *sst_cmd;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
+ hash_del(&sst_cmd->hnode);
+ kfree(sst_cmd);
+ }
+}
+
+/**
+ * isst_store_cmd() - Store command to a hash table
+ * @cmd: Mailbox command.
+ * @sub_cmd: Mailbox sub-command or MSR id.
+ * @mbox_cmd_type: Mailbox or MSR command.
+ * @param: Mailbox parameter.
+ * @data: Mailbox request data or MSR data.
+ *
+ * Stores the command to a hash table if there is no such command already
+ * stored. If already stored update the latest parameter and data for the
+ * command.
+ *
+ * Return: Return result of store to hash table, 0 for success, others for
+ * failure.
+ */
+int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
+ u32 param, u64 data)
+{
+ struct isst_cmd *sst_cmd;
+ int full_cmd, ret;
+
+ full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
+ full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
+ mutex_lock(&isst_hash_lock);
+ hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
+ if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
+ sst_cmd->mbox_cmd_type == mbox_cmd_type) {
+ sst_cmd->param = param;
+ sst_cmd->data = data;
+ mutex_unlock(&isst_hash_lock);
+ return 0;
+ }
+ }
+
+ ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
+ mutex_unlock(&isst_hash_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(isst_store_cmd);
+
+static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
+ struct isst_cmd *sst_cmd)
+{
+ struct isst_if_mbox_cmd mbox_cmd;
+ int wr_only;
+
+ mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
+ mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
+ mbox_cmd.parameter = sst_cmd->param;
+ mbox_cmd.req_data = sst_cmd->data;
+ mbox_cmd.logical_cpu = sst_cmd->cpu;
+ (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
+}
+
+/**
+ * isst_resume_common() - Process Resume request
+ *
+ * On resume replay all mailbox commands and MSRs.
+ *
+ * Return: None.
+ */
+void isst_resume_common(void)
+{
+ struct isst_cmd *sst_cmd;
+ int i;
+
+ hash_for_each(isst_hash, i, sst_cmd, hnode) {
+ struct isst_if_cmd_cb *cb;
+
+ if (sst_cmd->mbox_cmd_type) {
+ cb = &punit_callbacks[ISST_IF_DEV_MBOX];
+ if (cb->registered)
+ isst_mbox_resume_command(cb, sst_cmd);
+ } else {
+ wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
+ sst_cmd->data);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(isst_resume_common);
+
+static void isst_restore_msr_local(int cpu)
+{
+ struct isst_cmd *sst_cmd;
+ int i;
+
+ mutex_lock(&isst_hash_lock);
+ for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
+ if (!punit_msr_white_list[i])
+ break;
+
+ hash_for_each_possible(isst_hash, sst_cmd, hnode,
+ punit_msr_white_list[i]) {
+ if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
+ wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
+ }
+ }
+ mutex_unlock(&isst_hash_lock);
+}
+
+/**
+ * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
+ * @cmd: Pointer to the command structure to verify.
+ *
+ * Invalid command to PUNIT to may result in instability of the platform.
+ * This function has a whitelist of commands, which are allowed.
+ *
+ * Return: Return true if the command is invalid, else false.
+ */
+bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
+{
+ int i;
+
+ if (cmd->logical_cpu >= nr_cpu_ids)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
+ if (cmd->command == isst_valid_cmds[i].cmd &&
+ (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
+ cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
+
+/**
+ * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
+ * @cmd: Pointer to the command structure to verify.
+ *
+ * Check if the given mail box level is set request and not a get request.
+ *
+ * Return: Return true if the command is set_req, else false.
+ */
+bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
+ if (cmd->command == isst_cmd_set_reqs[i].cmd &&
+ cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
+ cmd->parameter == isst_cmd_set_reqs[i].param) {
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
+
+static int isst_if_get_platform_info(void __user *argp)
+{
+ struct isst_if_platform_info info;
+
+ info.api_version = ISST_IF_API_VERSION,
+ info.driver_version = ISST_IF_DRIVER_VERSION,
+ info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT,
+ info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
+ info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
+
+ if (copy_to_user(argp, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+
+struct isst_if_cpu_info {
+ /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
+ int bus_info[2];
+ int punit_cpu_id;
+};
+
+static struct isst_if_cpu_info *isst_cpu_info;
+
+/**
+ * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
+ * @cpu: Logical CPU number.
+ * @bus_number: The bus number assigned by the hardware.
+ * @dev: The device number assigned by the hardware.
+ * @fn: The function number assigned by the hardware.
+ *
+ * Using cached bus information, find out the PCI device for a bus number,
+ * device and function.
+ *
+ * Return: Return pci_dev pointer or NULL.
+ */
+struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
+{
+ int bus_number;
+
+ if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
+ cpu >= num_possible_cpus())
+ return NULL;
+
+ bus_number = isst_cpu_info[cpu].bus_info[bus_no];
+ if (bus_number < 0)
+ return NULL;
+
+ return pci_get_domain_bus_and_slot(0, bus_number, PCI_DEVFN(dev, fn));
+}
+EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
+
+static int isst_if_cpu_online(unsigned int cpu)
+{
+ u64 data;
+ int ret;
+
+ ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
+ if (ret) {
+ /* This is not a fatal error on MSR mailbox only I/F */
+ isst_cpu_info[cpu].bus_info[0] = -1;
+ isst_cpu_info[cpu].bus_info[1] = -1;
+ } else {
+ isst_cpu_info[cpu].bus_info[0] = data & 0xff;
+ isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
+ }
+
+ ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
+ if (ret) {
+ isst_cpu_info[cpu].punit_cpu_id = -1;
+ return ret;
+ }
+ isst_cpu_info[cpu].punit_cpu_id = data;
+
+ isst_restore_msr_local(cpu);
+
+ return 0;
+}
+
+static int isst_if_online_id;
+
+static int isst_if_cpu_info_init(void)
+{
+ int ret;
+
+ isst_cpu_info = kcalloc(num_possible_cpus(),
+ sizeof(*isst_cpu_info),
+ GFP_KERNEL);
+ if (!isst_cpu_info)
+ return -ENOMEM;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/isst-if:online",
+ isst_if_cpu_online, NULL);
+ if (ret < 0) {
+ kfree(isst_cpu_info);
+ return ret;
+ }
+
+ isst_if_online_id = ret;
+
+ return 0;
+}
+
+static void isst_if_cpu_info_exit(void)
+{
+ cpuhp_remove_state(isst_if_online_id);
+ kfree(isst_cpu_info);
+};
+
+static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_cpu_map *cpu_map;
+
+ cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
+ if (cpu_map->logical_cpu >= nr_cpu_ids ||
+ cpu_map->logical_cpu >= num_possible_cpus())
+ return -EINVAL;
+
+ *write_only = 0;
+ cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
+
+ return 0;
+}
+
+static bool match_punit_msr_white_list(int msr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
+ if (punit_msr_white_list[i] == msr)
+ return true;
+ }
+
+ return false;
+}
+
+static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_msr_cmd *msr_cmd;
+ int ret;
+
+ msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
+
+ if (!match_punit_msr_white_list(msr_cmd->msr))
+ return -EINVAL;
+
+ if (msr_cmd->logical_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (msr_cmd->read_write) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
+ msr_cmd->msr,
+ msr_cmd->data);
+ *write_only = 1;
+ if (!ret && !resume)
+ ret = isst_store_cmd(0, msr_cmd->msr,
+ msr_cmd->logical_cpu,
+ 0, 0, msr_cmd->data);
+ } else {
+ u64 data;
+
+ ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
+ msr_cmd->msr, &data);
+ if (!ret) {
+ msr_cmd->data = data;
+ *write_only = 0;
+ }
+ }
+
+
+ return ret;
+}
+
+static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
+{
+ unsigned char __user *ptr;
+ u32 cmd_count;
+ u8 *cmd_ptr;
+ long ret;
+ int i;
+
+ /* Each multi command has u32 command count as the first field */
+ if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
+ return -EFAULT;
+
+ if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
+ return -EINVAL;
+
+ cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
+ if (!cmd_ptr)
+ return -ENOMEM;
+
+ /* cb->offset points to start of the command after the command count */
+ ptr = argp + cb->offset;
+
+ for (i = 0; i < cmd_count; ++i) {
+ int wr_only;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
+ if (ret)
+ break;
+
+ if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ptr += cb->cmd_size;
+ }
+
+ kfree(cmd_ptr);
+
+ return i ? i : ret;
+}
+
+static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct isst_if_cmd_cb cmd_cb;
+ struct isst_if_cmd_cb *cb;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case ISST_IF_GET_PLATFORM_INFO:
+ ret = isst_if_get_platform_info(argp);
+ break;
+ case ISST_IF_GET_PHY_ID:
+ cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
+ cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
+ cmd_cb.cmd_callback = isst_if_proc_phyid_req;
+ ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
+ break;
+ case ISST_IF_IO_CMD:
+ cb = &punit_callbacks[ISST_IF_DEV_MMIO];
+ if (cb->registered)
+ ret = isst_if_exec_multi_cmd(argp, cb);
+ break;
+ case ISST_IF_MBOX_COMMAND:
+ cb = &punit_callbacks[ISST_IF_DEV_MBOX];
+ if (cb->registered)
+ ret = isst_if_exec_multi_cmd(argp, cb);
+ break;
+ case ISST_IF_MSR_COMMAND:
+ cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
+ cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
+ cmd_cb.cmd_callback = isst_if_msr_cmd_req;
+ ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static DEFINE_MUTEX(punit_misc_dev_lock);
+static int misc_usage_count;
+static int misc_device_ret;
+static int misc_device_open;
+
+static int isst_if_open(struct inode *inode, struct file *file)
+{
+ int i, ret = 0;
+
+ /* Fail open, if a module is going away */
+ mutex_lock(&punit_misc_dev_lock);
+ for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
+ struct isst_if_cmd_cb *cb = &punit_callbacks[i];
+
+ if (cb->registered && !try_module_get(cb->owner)) {
+ ret = -ENODEV;
+ break;
+ }
+ }
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; ++j) {
+ struct isst_if_cmd_cb *cb;
+
+ cb = &punit_callbacks[j];
+ if (cb->registered)
+ module_put(cb->owner);
+ }
+ } else {
+ misc_device_open++;
+ }
+ mutex_unlock(&punit_misc_dev_lock);
+
+ return ret;
+}
+
+static int isst_if_relase(struct inode *inode, struct file *f)
+{
+ int i;
+
+ mutex_lock(&punit_misc_dev_lock);
+ misc_device_open--;
+ for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
+ struct isst_if_cmd_cb *cb = &punit_callbacks[i];
+
+ if (cb->registered)
+ module_put(cb->owner);
+ }
+ mutex_unlock(&punit_misc_dev_lock);
+
+ return 0;
+}
+
+static const struct file_operations isst_if_char_driver_ops = {
+ .open = isst_if_open,
+ .unlocked_ioctl = isst_if_def_ioctl,
+ .release = isst_if_relase,
+};
+
+static struct miscdevice isst_if_char_driver = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "isst_interface",
+ .fops = &isst_if_char_driver_ops,
+};
+
+/**
+ * isst_if_cdev_register() - Register callback for IOCTL
+ * @device_type: The device type this callback handling.
+ * @cb: Callback structure.
+ *
+ * This function registers a callback to device type. On very first call
+ * it will register a misc device, which is used for user kernel interface.
+ * Other calls simply increment ref count. Registry will fail, if the user
+ * already opened misc device for operation. Also if the misc device
+ * creation failed, then it will not try again and all callers will get
+ * failure code.
+ *
+ * Return: Return the return value from the misc creation device or -EINVAL
+ * for unsupported device type.
+ */
+int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
+{
+ if (misc_device_ret)
+ return misc_device_ret;
+
+ if (device_type >= ISST_IF_DEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&punit_misc_dev_lock);
+ if (misc_device_open) {
+ mutex_unlock(&punit_misc_dev_lock);
+ return -EAGAIN;
+ }
+ if (!misc_usage_count) {
+ int ret;
+
+ misc_device_ret = misc_register(&isst_if_char_driver);
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ ret = isst_if_cpu_info_init();
+ if (ret) {
+ misc_deregister(&isst_if_char_driver);
+ misc_device_ret = ret;
+ goto unlock_exit;
+ }
+ }
+ memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
+ punit_callbacks[device_type].registered = 1;
+ misc_usage_count++;
+unlock_exit:
+ mutex_unlock(&punit_misc_dev_lock);
+
+ return misc_device_ret;
+}
+EXPORT_SYMBOL_GPL(isst_if_cdev_register);
+
+/**
+ * isst_if_cdev_unregister() - Unregister callback for IOCTL
+ * @device_type: The device type to unregister.
+ *
+ * This function unregisters the previously registered callback. If this
+ * is the last callback unregistering, then misc device is removed.
+ *
+ * Return: None.
+ */
+void isst_if_cdev_unregister(int device_type)
+{
+ mutex_lock(&punit_misc_dev_lock);
+ misc_usage_count--;
+ punit_callbacks[device_type].registered = 0;
+ if (device_type == ISST_IF_DEV_MBOX)
+ isst_delete_hash();
+ if (!misc_usage_count && !misc_device_ret) {
+ misc_deregister(&isst_if_char_driver);
+ isst_if_cpu_info_exit();
+ }
+ mutex_unlock(&punit_misc_dev_lock);
+}
+EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h
new file mode 100644
index 000000000000..1409a5bb5582
--- /dev/null
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Speed Select Interface: Drivers Internal defines
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#ifndef __ISST_IF_COMMON_H
+#define __ISST_IF_COMMON_H
+
+#define INTEL_RAPL_PRIO_DEVID_0 0x3451
+#define INTEL_CFG_MBOX_DEVID_0 0x3459
+
+/*
+ * Validate maximum commands in a single request.
+ * This is enough to handle command to every core in one ioctl, or all
+ * possible message id to one CPU. Limit is also helpful for resonse time
+ * per IOCTL request, as PUNIT may take different times to process each
+ * request and may hold for long for too many commands.
+ */
+#define ISST_IF_CMD_LIMIT 64
+
+#define ISST_IF_API_VERSION 0x01
+#define ISST_IF_DRIVER_VERSION 0x01
+
+#define ISST_IF_DEV_MBOX 0
+#define ISST_IF_DEV_MMIO 1
+#define ISST_IF_DEV_MAX 2
+
+/**
+ * struct isst_if_cmd_cb - Used to register a IOCTL handler
+ * @registered: Used by the common code to store registry. Caller don't
+ * to touch this field
+ * @cmd_size: The command size of the individual command in IOCTL
+ * @offset: Offset to the first valid member in command structure.
+ * This will be the offset of the start of the command
+ * after command count field
+ * @cmd_callback: Callback function to handle IOCTL. The callback has the
+ * command pointer with data for command. There is a pointer
+ * called write_only, which when set, will not copy the
+ * response to user ioctl buffer. The "resume" argument
+ * can be used to avoid storing the command for replay
+ * during system resume
+ *
+ * This structure is used to register an handler for IOCTL. To avoid
+ * code duplication common code handles all the IOCTL command read/write
+ * including handling multiple command in single IOCTL. The caller just
+ * need to execute a command via the registered callback.
+ */
+struct isst_if_cmd_cb {
+ int registered;
+ int cmd_size;
+ int offset;
+ struct module *owner;
+ long (*cmd_callback)(u8 *ptr, int *write_only, int resume);
+};
+
+/* Internal interface functions */
+int isst_if_cdev_register(int type, struct isst_if_cmd_cb *cb);
+void isst_if_cdev_unregister(int type);
+struct pci_dev *isst_if_get_pci_dev(int cpu, int bus, int dev, int fn);
+bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *mbox_cmd);
+bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd);
+int isst_store_cmd(int cmd, int sub_command, u32 cpu, int mbox_cmd,
+ u32 param, u64 data);
+void isst_resume_common(void);
+#endif
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c
new file mode 100644
index 000000000000..89b042aecef3
--- /dev/null
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_msr.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Mbox via MSR Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/cpuhotplug.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/topology.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#include "isst_if_common.h"
+
+#define MSR_OS_MAILBOX_INTERFACE 0xB0
+#define MSR_OS_MAILBOX_DATA 0xB1
+#define MSR_OS_MAILBOX_BUSY_BIT 31
+
+/*
+ * Based on experiments count is never more than 1, as the MSR overhead
+ * is enough to finish the command. So here this is the worst case number.
+ */
+#define OS_MAILBOX_RETRY_COUNT 3
+
+static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
+ u32 command_data, u32 *response_data)
+{
+ u32 retries;
+ u64 data;
+ int ret;
+
+ /* Poll for rb bit == 0 */
+ retries = OS_MAILBOX_RETRY_COUNT;
+ do {
+ rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ continue;
+ }
+ ret = 0;
+ break;
+ } while (--retries);
+
+ if (ret)
+ return ret;
+
+ /* Write DATA register */
+ wrmsrl(MSR_OS_MAILBOX_DATA, command_data);
+
+ /* Write command register */
+ data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) |
+ (parameter & GENMASK_ULL(13, 0)) << 16 |
+ (sub_command << 8) |
+ command;
+ wrmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+
+ /* Poll for rb bit == 0 */
+ retries = OS_MAILBOX_RETRY_COUNT;
+ do {
+ rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ continue;
+ }
+
+ if (data & 0xff)
+ return -ENXIO;
+
+ if (response_data) {
+ rdmsrl(MSR_OS_MAILBOX_DATA, data);
+ *response_data = data;
+ }
+ ret = 0;
+ break;
+ } while (--retries);
+
+ return ret;
+}
+
+struct msrl_action {
+ int err;
+ struct isst_if_mbox_cmd *mbox_cmd;
+};
+
+/* revisit, smp_call_function_single should be enough for atomic mailbox! */
+static void msrl_update_func(void *info)
+{
+ struct msrl_action *act = info;
+
+ act->err = isst_if_send_mbox_cmd(act->mbox_cmd->command,
+ act->mbox_cmd->sub_command,
+ act->mbox_cmd->parameter,
+ act->mbox_cmd->req_data,
+ &act->mbox_cmd->resp_data);
+}
+
+static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct msrl_action action;
+ int ret;
+
+ action.mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr;
+
+ if (isst_if_mbox_cmd_invalid(action.mbox_cmd))
+ return -EINVAL;
+
+ if (isst_if_mbox_cmd_set_req(action.mbox_cmd) &&
+ !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * To complete mailbox command, we need to access two MSRs.
+ * So we don't want race to complete a mailbox transcation.
+ * Here smp_call ensures that msrl_update_func() has no race
+ * and also with wait flag, wait for completion.
+ * smp_call_function_single is using get_cpu() and put_cpu().
+ */
+ ret = smp_call_function_single(action.mbox_cmd->logical_cpu,
+ msrl_update_func, &action, 1);
+ if (ret)
+ return ret;
+
+ if (!action.err && !resume && isst_if_mbox_cmd_set_req(action.mbox_cmd))
+ action.err = isst_store_cmd(action.mbox_cmd->command,
+ action.mbox_cmd->sub_command,
+ action.mbox_cmd->logical_cpu, 1,
+ action.mbox_cmd->parameter,
+ action.mbox_cmd->req_data);
+ *write_only = 0;
+
+ return action.err;
+}
+
+
+static int isst_pm_notify(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ isst_resume_common();
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block isst_pm_nb = {
+ .notifier_call = isst_pm_notify,
+};
+
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id isst_if_cpu_ids[] = {
+ ICPU(INTEL_FAM6_SKYLAKE_X),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids);
+
+static int __init isst_if_mbox_init(void)
+{
+ struct isst_if_cmd_cb cb;
+ const struct x86_cpu_id *id;
+ u64 data;
+ int ret;
+
+ id = x86_match_cpu(isst_if_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ /* Check presence of mailbox MSRs */
+ ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data);
+ if (ret)
+ return ret;
+
+ ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data);
+ if (ret)
+ return ret;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_mbox_cmd);
+ cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd);
+ cb.cmd_callback = isst_if_mbox_proc_cmd;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb);
+ if (ret)
+ return ret;
+
+ ret = register_pm_notifier(&isst_pm_nb);
+ if (ret)
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+
+ return ret;
+}
+module_init(isst_if_mbox_init)
+
+static void __exit isst_if_mbox_exit(void)
+{
+ unregister_pm_notifier(&isst_pm_nb);
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+}
+module_exit(isst_if_mbox_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface mailbox driver");
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
new file mode 100644
index 000000000000..de4169d0796b
--- /dev/null
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: Mbox via PCI Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_if_common.h"
+
+#define PUNIT_MAILBOX_DATA 0xA0
+#define PUNIT_MAILBOX_INTERFACE 0xA4
+#define PUNIT_MAILBOX_BUSY_BIT 31
+
+/*
+ * Commands has variable amount of processing time. Most of the commands will
+ * be done in 0-3 tries, but some takes up to 50.
+ * The real processing time was observed as 25us for the most of the commands
+ * at 2GHz. It is possible to optimize this count taking samples on customer
+ * systems.
+ */
+#define OS_MAILBOX_RETRY_COUNT 50
+
+struct isst_if_device {
+ struct mutex mutex;
+};
+
+static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ struct isst_if_mbox_cmd *mbox_cmd)
+{
+ u32 retries, data;
+ int ret;
+
+ /* Poll for rb bit == 0 */
+ retries = OS_MAILBOX_RETRY_COUNT;
+ do {
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ continue;
+ }
+ ret = 0;
+ break;
+ } while (--retries);
+
+ if (ret)
+ return ret;
+
+ /* Write DATA register */
+ ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_DATA,
+ mbox_cmd->req_data);
+ if (ret)
+ return ret;
+
+ /* Write command register */
+ data = BIT_ULL(PUNIT_MAILBOX_BUSY_BIT) |
+ (mbox_cmd->parameter & GENMASK_ULL(13, 0)) << 16 |
+ (mbox_cmd->sub_command << 8) |
+ mbox_cmd->command;
+
+ ret = pci_write_config_dword(pdev, PUNIT_MAILBOX_INTERFACE, data);
+ if (ret)
+ return ret;
+
+ /* Poll for rb bit == 0 */
+ retries = OS_MAILBOX_RETRY_COUNT;
+ do {
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ ret = -EBUSY;
+ continue;
+ }
+
+ if (data & 0xff)
+ return -ENXIO;
+
+ ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_DATA, &data);
+ if (ret)
+ return ret;
+
+ mbox_cmd->resp_data = data;
+ ret = 0;
+ break;
+ } while (--retries);
+
+ return ret;
+}
+
+static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_mbox_cmd *mbox_cmd;
+ struct isst_if_device *punit_dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ mbox_cmd = (struct isst_if_mbox_cmd *)cmd_ptr;
+
+ if (isst_if_mbox_cmd_invalid(mbox_cmd))
+ return -EINVAL;
+
+ if (isst_if_mbox_cmd_set_req(mbox_cmd) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ pdev = isst_if_get_pci_dev(mbox_cmd->logical_cpu, 1, 30, 1);
+ if (!pdev)
+ return -EINVAL;
+
+ punit_dev = pci_get_drvdata(pdev);
+ if (!punit_dev)
+ return -EINVAL;
+
+ /*
+ * Basically we are allowing one complete mailbox transaction on
+ * a mapped PCI device at a time.
+ */
+ mutex_lock(&punit_dev->mutex);
+ ret = isst_if_mbox_cmd(pdev, mbox_cmd);
+ if (!ret && !resume && isst_if_mbox_cmd_set_req(mbox_cmd))
+ ret = isst_store_cmd(mbox_cmd->command,
+ mbox_cmd->sub_command,
+ mbox_cmd->logical_cpu, 1,
+ mbox_cmd->parameter,
+ mbox_cmd->req_data);
+ mutex_unlock(&punit_dev->mutex);
+ if (ret)
+ return ret;
+
+ *write_only = 0;
+
+ return 0;
+}
+
+static const struct pci_device_id isst_if_mbox_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)},
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids);
+
+static int isst_if_mbox_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_cmd_cb cb;
+ int ret;
+
+ punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL);
+ if (!punit_dev)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ mutex_init(&punit_dev->mutex);
+ pci_set_drvdata(pdev, punit_dev);
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_mbox_cmd);
+ cb.offset = offsetof(struct isst_if_mbox_cmds, mbox_cmd);
+ cb.cmd_callback = isst_if_mbox_proc_cmd;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MBOX, &cb);
+
+ if (ret)
+ mutex_destroy(&punit_dev->mutex);
+
+ return ret;
+}
+
+static void isst_if_mbox_remove(struct pci_dev *pdev)
+{
+ struct isst_if_device *punit_dev;
+
+ punit_dev = pci_get_drvdata(pdev);
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+ mutex_destroy(&punit_dev->mutex);
+}
+
+static int __maybe_unused isst_if_resume(struct device *device)
+{
+ isst_resume_common();
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, NULL, isst_if_resume);
+
+static struct pci_driver isst_if_pci_driver = {
+ .name = "isst_if_mbox_pci",
+ .id_table = isst_if_mbox_ids,
+ .probe = isst_if_mbox_probe,
+ .remove = isst_if_mbox_remove,
+ .driver.pm = &isst_if_pm_ops,
+};
+
+module_pci_driver(isst_if_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface pci mailbox driver");
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
new file mode 100644
index 000000000000..f7266a115a08
--- /dev/null
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select Interface: MMIO Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/isst_if.h>
+
+#include "isst_if_common.h"
+
+struct isst_mmio_range {
+ int beg;
+ int end;
+};
+
+struct isst_mmio_range mmio_range[] = {
+ {0x04, 0x14},
+ {0x20, 0xD0},
+};
+
+struct isst_if_device {
+ void __iomem *punit_mmio;
+ u32 range_0[5];
+ u32 range_1[45];
+ struct mutex mutex;
+};
+
+static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_io_reg *io_reg;
+ struct pci_dev *pdev;
+
+ io_reg = (struct isst_if_io_reg *)cmd_ptr;
+ if (io_reg->reg < 0x04 || io_reg->reg > 0xD0)
+ return -EINVAL;
+
+ if (io_reg->read_write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ pdev = isst_if_get_pci_dev(io_reg->logical_cpu, 0, 0, 1);
+ if (!pdev)
+ return -EINVAL;
+
+ punit_dev = pci_get_drvdata(pdev);
+ if (!punit_dev)
+ return -EINVAL;
+
+ /*
+ * Ensure that operation is complete on a PCI device to avoid read
+ * write race by using per PCI device mutex.
+ */
+ mutex_lock(&punit_dev->mutex);
+ if (io_reg->read_write) {
+ writel(io_reg->value, punit_dev->punit_mmio+io_reg->reg);
+ *write_only = 1;
+ } else {
+ io_reg->value = readl(punit_dev->punit_mmio+io_reg->reg);
+ *write_only = 0;
+ }
+ mutex_unlock(&punit_dev->mutex);
+
+ return 0;
+}
+
+static const struct pci_device_id isst_if_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)},
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, isst_if_ids);
+
+static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct isst_if_device *punit_dev;
+ struct isst_if_cmd_cb cb;
+ u32 mmio_base, pcu_base;
+ u64 base_addr;
+ int ret;
+
+ punit_dev = devm_kzalloc(&pdev->dev, sizeof(*punit_dev), GFP_KERNEL);
+ if (!punit_dev)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_dword(pdev, 0xD0, &mmio_base);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_dword(pdev, 0xFC, &pcu_base);
+ if (ret)
+ return ret;
+
+ pcu_base &= GENMASK(10, 0);
+ base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12;
+ punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, 256);
+ if (!punit_dev->punit_mmio)
+ return -ENOMEM;
+
+ mutex_init(&punit_dev->mutex);
+ pci_set_drvdata(pdev, punit_dev);
+
+ memset(&cb, 0, sizeof(cb));
+ cb.cmd_size = sizeof(struct isst_if_io_reg);
+ cb.offset = offsetof(struct isst_if_io_regs, io_reg);
+ cb.cmd_callback = isst_if_mmio_rd_wr;
+ cb.owner = THIS_MODULE;
+ ret = isst_if_cdev_register(ISST_IF_DEV_MMIO, &cb);
+ if (ret)
+ mutex_destroy(&punit_dev->mutex);
+
+ return ret;
+}
+
+static void isst_if_remove(struct pci_dev *pdev)
+{
+ struct isst_if_device *punit_dev;
+
+ punit_dev = pci_get_drvdata(pdev);
+ isst_if_cdev_unregister(ISST_IF_DEV_MBOX);
+ mutex_destroy(&punit_dev->mutex);
+}
+
+static int __maybe_unused isst_if_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct isst_if_device *punit_dev;
+ int i;
+
+ punit_dev = pci_get_drvdata(pdev);
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i)
+ punit_dev->range_0[i] = readl(punit_dev->punit_mmio +
+ mmio_range[0].beg + 4 * i);
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i)
+ punit_dev->range_1[i] = readl(punit_dev->punit_mmio +
+ mmio_range[1].beg + 4 * i);
+
+ return 0;
+}
+
+static int __maybe_unused isst_if_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct isst_if_device *punit_dev;
+ int i;
+
+ punit_dev = pci_get_drvdata(pdev);
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i)
+ writel(punit_dev->range_0[i], punit_dev->punit_mmio +
+ mmio_range[0].beg + 4 * i);
+ for (i = 0; i < ARRAY_SIZE(punit_dev->range_1); ++i)
+ writel(punit_dev->range_1[i], punit_dev->punit_mmio +
+ mmio_range[1].beg + 4 * i);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(isst_if_pm_ops, isst_if_suspend, isst_if_resume);
+
+static struct pci_driver isst_if_pci_driver = {
+ .name = "isst_if_pci",
+ .id_table = isst_if_ids,
+ .probe = isst_if_probe,
+ .remove = isst_if_remove,
+ .driver.pm = &isst_if_pm_ops,
+};
+
+module_pci_driver(isst_if_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel speed select interface mmio driver");
diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c
index 4f60d8e32a0a..93bbcbb6e90b 100644
--- a/drivers/platform/x86/intel_turbo_max_3.c
+++ b/drivers/platform/x86/intel_turbo_max_3.c
@@ -137,9 +137,6 @@ static int __init itmt_legacy_init(void)
if (!id)
return -ENODEV;
- if (boot_cpu_has(X86_FEATURE_HWP))
- return -ENODEV;
-
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"platform/x86/turbo_max_3:online",
itmt_legacy_cpu_online, NULL);
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index d4d089c37944..63a6881c7078 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -453,6 +453,14 @@ static const struct dmi_system_id critclk_systems[] = {
},
{
/* pmc_plt_clk* - are used for ethernet controllers */
+ .ident = "Beckhoff CB4063",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+ DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
+ },
+ },
+ {
+ /* pmc_plt_clk* - are used for ethernet controllers */
.ident = "Beckhoff CB6263",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 1453ef8984b0..7d8c7a337cbe 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -716,10 +716,15 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
- count += sizeof(int))
+ count += sizeof(int)) {
+ if (act->flags & CCW_FLAG_SLI) {
+ len += sprintf(page + len, " 00000000");
+ break;
+ }
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
+ }
len += sprintf(page + len, "\n");
act++;
}
@@ -738,10 +743,15 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
- count += sizeof(int))
+ count += sizeof(int)) {
+ if (act->flags & CCW_FLAG_SLI) {
+ len += sprintf(page + len, " 00000000");
+ break;
+ }
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
+ }
len += sprintf(page + len, "\n");
act++;
}
@@ -756,10 +766,15 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
- count += sizeof(int))
+ count += sizeof(int)) {
+ if (act->flags & CCW_FLAG_SLI) {
+ len += sprintf(page + len, " 00000000");
+ break;
+ }
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
+ }
len += sprintf(page + len, "\n");
act++;
}
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index babd79361a46..0d17b5abf382 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -116,12 +116,12 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.get_session_param = iscsi_session_get_param,
/* connection management */
.create_conn = cxgbi_create_conn,
- .bind_conn = cxgbi_bind_conn,
+ .bind_conn = __cxgbi_bind_conn,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
.get_conn_param = iscsi_conn_get_param,
- .set_param = cxgbi_set_conn_param,
+ .set_param = __cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
.send_pdu = iscsi_conn_send_pdu,
@@ -136,7 +136,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
.get_ep_param = cxgbi_get_ep_param,
- .ep_connect = cxgbi_ep_connect,
+ .ep_connect = __cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
/* Error recovery timeout call */
@@ -979,14 +979,17 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
if (csk->atid < 0) {
pr_err("NO atid available.\n");
- goto rel_resource;
+ return -EINVAL;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
- if (!skb)
- goto rel_resource;
+ if (!skb) {
+ cxgb3_free_atid(t3dev, csk->atid);
+ cxgbi_sock_put(csk);
+ return -ENOMEM;
+ }
skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure);
csk->snd_win = cxgb3i_snd_win;
@@ -1007,11 +1010,6 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
return 0;
-
-rel_resource:
- if (skb)
- __kfree_skb(skb);
- return -EINVAL;
}
cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
@@ -1144,7 +1142,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
- unsigned int tid, int pg_idx, bool reply)
+ unsigned int tid, int pg_idx)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1160,7 +1158,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1175,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
* @tid: connection id
* @hcrc: header digest enabled
* @dcrc: data digest enabled
- * @reply: request reply from h/w
* set up the iscsi digest settings for a connection identified by tid
*/
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1197,7 +1194,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0x0F000000);
@@ -1261,8 +1258,8 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
err = 0;
}
- cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
- cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
+ cdev->__csk_ddp_setup_digest = ddp_setup_conn_digest;
+ cdev->__csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set_map = ddp_set_map;
cdev->csk_ddp_clear_map = ddp_clear_map;
cdev->cdev2ppm = cdev2ppm;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index c4a4a15566fc..1d97ce206a6e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -133,12 +133,12 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.get_session_param = iscsi_session_get_param,
/* connection management */
.create_conn = cxgbi_create_conn,
- .bind_conn = cxgbi_bind_conn,
+ .bind_conn = __cxgbi_bind_conn,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
.get_conn_param = iscsi_conn_get_param,
- .set_param = cxgbi_set_conn_param,
+ .set_param = __cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
.send_pdu = iscsi_conn_send_pdu,
@@ -153,7 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
.get_ep_param = cxgbi_get_ep_param,
- .ep_connect = cxgbi_ep_connect,
+ .ep_connect = __cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
/* Error recovery timeout call */
@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cxgbi_sock *csk;
csk = lookup_tid(t, tid);
- if (!csk)
+ if (!csk) {
pr_err("can't find conn. for tid %u.\n", tid);
+ return;
+ }
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, rpl->status);
- if (rpl->status != CPL_ERR_NONE)
+ if (rpl->status != CPL_ERR_NONE) {
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
csk, tid, rpl->status);
+ csk->err = -EINVAL;
+ }
+
+ complete(&csk->cmpl);
__kfree_skb(skb);
}
@@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
- int pg_idx, bool reply)
+ int pg_idx)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
@@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
@@ -2076,8 +2088,8 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax,
lldi->iscsi_llimit, lldi->vr->iscsi.start, 2);
- cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
- cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
+ cdev->__csk_ddp_setup_digest = ddp_setup_conn_digest;
+ cdev->__csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set_map = ddp_set_map;
cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3f3af5e74a07..6777f6b09175 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -553,7 +553,7 @@ void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
}
EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
-static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
+static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev, bool kabi_supports_completion)
{
struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
@@ -573,6 +573,8 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
skb_queue_head_init(&csk->receive_queue);
skb_queue_head_init(&csk->write_queue);
timer_setup(&csk->retry_timer, NULL, 0);
+ if (kabi_supports_completion)
+ init_completion(&csk->cmpl);
rwlock_init(&csk->callback_lock);
csk->cdev = cdev;
csk->flags = 0;
@@ -599,7 +601,7 @@ static struct rtable *find_route_ipv4(struct flowi4 *fl4,
}
static struct cxgbi_sock *
-cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
+cxgbi_check_route(struct sockaddr *dst_addr, int ifindex, bool kabi_supports_completion)
{
struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
struct dst_entry *dst;
@@ -663,7 +665,7 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
&daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
port, ndev->name, cdev);
- csk = cxgbi_sock_create(cdev);
+ csk = cxgbi_sock_create(cdev, kabi_supports_completion);
if (!csk) {
err = -ENOMEM;
goto rel_neigh;
@@ -709,7 +711,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
}
static struct cxgbi_sock *
-cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex)
+cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex, bool kabi_supports_completion)
{
struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
struct dst_entry *dst;
@@ -774,7 +776,7 @@ cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex)
daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port,
ndev->name, cdev);
- csk = cxgbi_sock_create(cdev);
+ csk = cxgbi_sock_create(cdev, kabi_supports_completion);
if (!csk) {
err = -ENOMEM;
goto rel_rt;
@@ -2233,8 +2235,9 @@ static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
return 0;
}
-int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf, int buflen)
+static int cxgbi_set_conn_param_internal(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen,
+ bool does_not_need_reply_arg)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
@@ -2249,17 +2252,29 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
switch (param) {
case ISCSI_PARAM_HDRDGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
- if (!err && conn->hdrdgst_en)
- err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
- conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ if (!err && conn->hdrdgst_en) {
+ if (does_not_need_reply_arg)
+ err = csk->cdev->__csk_ddp_setup_digest(csk, csk->tid,
+ conn->hdrdgst_en,
+ conn->datadgst_en);
+ else
+ err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
+ conn->hdrdgst_en,
+ conn->datadgst_en, 0);
+ }
break;
case ISCSI_PARAM_DATADGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
- if (!err && conn->datadgst_en)
- err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
- conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ if (!err && conn->datadgst_en) {
+ if (does_not_need_reply_arg)
+ err = csk->cdev->__csk_ddp_setup_digest(csk, csk->tid,
+ conn->hdrdgst_en,
+ conn->datadgst_en);
+ else
+ err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
+ conn->hdrdgst_en,
+ conn->datadgst_en, 0);
+ }
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2278,8 +2293,21 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
return err;
}
+
+int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
+{
+ return cxgbi_set_conn_param_internal(cls_conn, param, buf, buflen, 0);
+}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
+int __cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
+{
+ return cxgbi_set_conn_param_internal(cls_conn, param, buf, buflen, 1);
+}
+EXPORT_SYMBOL_GPL(__cxgbi_set_conn_param);
+
static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
{
int len;
@@ -2362,9 +2390,10 @@ cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
}
EXPORT_SYMBOL_GPL(cxgbi_create_conn);
-int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
+int cxgbi_bind_conn_internal(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
- u64 transport_eph, int is_leading)
+ u64 transport_eph, int is_leading,
+ bool does_not_need_reply_arg)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
@@ -2384,8 +2413,12 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
csk = cep->csk;
ppm = csk->cdev->cdev2ppm(csk->cdev);
- err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
- ppm->tformat.pgsz_idx_dflt, 0);
+ if (does_not_need_reply_arg)
+ err = csk->cdev->__csk_ddp_setup_pgidx(csk, csk->tid,
+ ppm->tformat.pgsz_idx_dflt);
+ else
+ err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
+ ppm->tformat.pgsz_idx_dflt, 0);
if (err < 0)
return err;
@@ -2414,8 +2447,25 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
return 0;
}
+
+int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_eph, int is_leading)
+{
+ return cxgbi_bind_conn_internal(cls_session, cls_conn, transport_eph,
+ is_leading, 0);
+}
EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
+int __cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_eph, int is_leading)
+{
+ return cxgbi_bind_conn_internal(cls_session, cls_conn, transport_eph,
+ is_leading, 1);
+}
+EXPORT_SYMBOL_GPL(__cxgbi_bind_conn);
+
struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
u16 cmds_max, u16 qdepth,
u32 initial_cmdsn)
@@ -2545,9 +2595,9 @@ int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
}
EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
-struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
+static struct iscsi_endpoint *cxgbi_ep_connect_internal(struct Scsi_Host *shost,
struct sockaddr *dst_addr,
- int non_blocking)
+ int non_blocking, bool kabi_supports_completion)
{
struct iscsi_endpoint *ep;
struct cxgbi_endpoint *cep;
@@ -2574,10 +2624,10 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
}
if (dst_addr->sa_family == AF_INET) {
- csk = cxgbi_check_route(dst_addr, ifindex);
+ csk = cxgbi_check_route(dst_addr, ifindex, kabi_supports_completion);
#if IS_ENABLED(CONFIG_IPV6)
} else if (dst_addr->sa_family == AF_INET6) {
- csk = cxgbi_check_route6(dst_addr, ifindex);
+ csk = cxgbi_check_route6(dst_addr, ifindex, kabi_supports_completion);
#endif
} else {
pr_info("address family 0x%x NOT supported.\n",
@@ -2638,8 +2688,32 @@ release_conn:
err_out:
return ERR_PTR(err);
}
+
+
+struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ /*
+ * this uses the original KABI, which uses NOT completion, and
+ * for external drivers expecting the original KABI
+ */
+ return cxgbi_ep_connect_internal(shost, dst_addr, non_blocking, false);
+}
EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
+struct iscsi_endpoint *__cxgbi_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ /*
+ * this uses the new KABI, which uses completion, and is
+ * used by our updated cxgb3i and cxgb4i drivers
+ */
+ return cxgbi_ep_connect_internal(shost, dst_addr, non_blocking, true);
+}
+EXPORT_SYMBOL_GPL(__cxgbi_ep_connect);
+
int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct cxgbi_endpoint *cep = ep->dd_data;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 5d5d8b50d842..7cee794e271e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -161,6 +161,9 @@ struct cxgbi_sock {
u32 write_seq;
u32 snd_win;
u32 rcv_win;
+#ifndef __GENKSYMS__
+ struct completion cmpl;
+#endif
};
/*
@@ -504,6 +507,12 @@ struct cxgbi_device {
int (*csk_init_act_open)(struct cxgbi_sock *);
void *dd_data;
+#ifndef __GENKSYMS__
+ int (*__csk_ddp_setup_digest)(struct cxgbi_sock *,
+ unsigned int, int, int);
+ int (*__csk_ddp_setup_pgidx)(struct cxgbi_sock *,
+ unsigned int, int);
+#endif
};
#define cxgbi_cdev_priv(cdev) ((cdev)->dd_data)
@@ -589,10 +598,14 @@ umode_t cxgbi_attr_is_visible(int param_type, int param);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
+int __cxgbi_set_conn_param(struct iscsi_cls_conn *,
+ enum iscsi_param, char *, int);
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
int cxgbi_bind_conn(struct iscsi_cls_session *,
struct iscsi_cls_conn *, u64, int);
+int __cxgbi_bind_conn(struct iscsi_cls_session *,
+ struct iscsi_cls_conn *, u64, int);
void cxgbi_destroy_session(struct iscsi_cls_session *);
struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *,
u16, u16, u32);
@@ -601,6 +614,8 @@ int cxgbi_set_host_param(struct Scsi_Host *,
int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *);
struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *,
struct sockaddr *, int);
+struct iscsi_endpoint *__cxgbi_ep_connect(struct Scsi_Host *,
+ struct sockaddr *, int);
int cxgbi_ep_poll(struct iscsi_endpoint *, int);
void cxgbi_ep_disconnect(struct iscsi_endpoint *);
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 48c7890c3007..2b0b757dc626 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -2339,7 +2339,8 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
devpriv->intr_running = false;
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
- comedi_handle_events(dev, s_ao);
+ if (s_ao)
+ comedi_handle_events(dev, s_ao);
comedi_handle_events(dev, s_ai);
return IRQ_HANDLED;
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index d5295bbdd28c..37133d54dda1 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -566,7 +566,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
}
#endif
comedi_handle_events(dev, s);
- comedi_handle_events(dev, s_ao);
+ if (s_ao)
+ comedi_handle_events(dev, s_ao);
return IRQ_RETVAL(handled);
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index e30a5be5f318..c383d026797d 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -137,10 +137,91 @@ static inline void handle_group_key(struct ieee_param *param,
}
}
-static noinline_for_stack char *translate_scan(struct _adapter *padapter,
- struct iw_request_info *info,
- struct wlan_network *pnetwork,
- char *start, char *stop)
+static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
+ struct wlan_network *pnetwork,
+ struct iw_event *iwe,
+ char *start, char *stop)
+{
+ /* parsing WPA/WPA2 IE */
+ u8 buf[MAX_WPA_IE_LEN];
+ u8 wpa_ie[255], rsn_ie[255];
+ u16 wpa_len = 0, rsn_len = 0;
+ int n, i;
+
+ r8712_get_sec_ie(pnetwork->network.IEs,
+ pnetwork->network.IELength, rsn_ie, &rsn_len,
+ wpa_ie, &wpa_len);
+ if (wpa_len > 0) {
+ memset(buf, 0, MAX_WPA_IE_LEN);
+ n = sprintf(buf, "wpa_ie=");
+ for (i = 0; i < wpa_len; i++) {
+ n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+ "%02x", wpa_ie[i]);
+ if (n >= MAX_WPA_IE_LEN)
+ break;
+ }
+ memset(iwe, 0, sizeof(*iwe));
+ iwe->cmd = IWEVCUSTOM;
+ iwe->u.data.length = (u16)strlen(buf);
+ start = iwe_stream_add_point(info, start, stop,
+ iwe, buf);
+ memset(iwe, 0, sizeof(*iwe));
+ iwe->cmd = IWEVGENIE;
+ iwe->u.data.length = (u16)wpa_len;
+ start = iwe_stream_add_point(info, start, stop,
+ iwe, wpa_ie);
+ }
+ if (rsn_len > 0) {
+ memset(buf, 0, MAX_WPA_IE_LEN);
+ n = sprintf(buf, "rsn_ie=");
+ for (i = 0; i < rsn_len; i++) {
+ n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+ "%02x", rsn_ie[i]);
+ if (n >= MAX_WPA_IE_LEN)
+ break;
+ }
+ memset(iwe, 0, sizeof(*iwe));
+ iwe->cmd = IWEVCUSTOM;
+ iwe->u.data.length = strlen(buf);
+ start = iwe_stream_add_point(info, start, stop,
+ iwe, buf);
+ memset(iwe, 0, sizeof(*iwe));
+ iwe->cmd = IWEVGENIE;
+ iwe->u.data.length = rsn_len;
+ start = iwe_stream_add_point(info, start, stop, iwe,
+ rsn_ie);
+ }
+
+ return start;
+}
+
+static noinline_for_stack char *translate_scan_wps(struct iw_request_info *info,
+ struct wlan_network *pnetwork,
+ struct iw_event *iwe,
+ char *start, char *stop)
+{
+ /* parsing WPS IE */
+ u8 wps_ie[512];
+ uint wps_ielen;
+
+ if (r8712_get_wps_ie(pnetwork->network.IEs,
+ pnetwork->network.IELength,
+ wps_ie, &wps_ielen)) {
+ if (wps_ielen > 2) {
+ iwe->cmd = IWEVGENIE;
+ iwe->u.data.length = (u16)wps_ielen;
+ start = iwe_stream_add_point(info, start, stop,
+ iwe, wps_ie);
+ }
+ }
+
+ return start;
+}
+
+static char *translate_scan(struct _adapter *padapter,
+ struct iw_request_info *info,
+ struct wlan_network *pnetwork,
+ char *start, char *stop)
{
struct iw_event iwe;
struct ieee80211_ht_cap *pht_capie;
@@ -253,73 +334,11 @@ static noinline_for_stack char *translate_scan(struct _adapter *padapter,
/* Check if we added any event */
if ((current_val - start) > iwe_stream_lcp_len(info))
start = current_val;
- /* parsing WPA/WPA2 IE */
- {
- u8 buf[MAX_WPA_IE_LEN];
- u8 wpa_ie[255], rsn_ie[255];
- u16 wpa_len = 0, rsn_len = 0;
- int n;
-
- r8712_get_sec_ie(pnetwork->network.IEs,
- pnetwork->network.IELength, rsn_ie, &rsn_len,
- wpa_ie, &wpa_len);
- if (wpa_len > 0) {
- memset(buf, 0, MAX_WPA_IE_LEN);
- n = sprintf(buf, "wpa_ie=");
- for (i = 0; i < wpa_len; i++) {
- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
- "%02x", wpa_ie[i]);
- if (n >= MAX_WPA_IE_LEN)
- break;
- }
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = (u16)strlen(buf);
- start = iwe_stream_add_point(info, start, stop,
- &iwe, buf);
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = (u16)wpa_len;
- start = iwe_stream_add_point(info, start, stop,
- &iwe, wpa_ie);
- }
- if (rsn_len > 0) {
- memset(buf, 0, MAX_WPA_IE_LEN);
- n = sprintf(buf, "rsn_ie=");
- for (i = 0; i < rsn_len; i++) {
- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
- "%02x", rsn_ie[i]);
- if (n >= MAX_WPA_IE_LEN)
- break;
- }
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = strlen(buf);
- start = iwe_stream_add_point(info, start, stop,
- &iwe, buf);
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = rsn_len;
- start = iwe_stream_add_point(info, start, stop, &iwe,
- rsn_ie);
- }
- }
- { /* parsing WPS IE */
- u8 wps_ie[512];
- uint wps_ielen;
+ start = translate_scan_wpa(info, pnetwork, &iwe, start, stop);
+
+ start = translate_scan_wps(info, pnetwork, &iwe, start, stop);
- if (r8712_get_wps_ie(pnetwork->network.IEs,
- pnetwork->network.IELength,
- wps_ie, &wps_ielen)) {
- if (wps_ielen > 2) {
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = (u16)wps_ielen;
- start = iwe_stream_add_point(info, start, stop,
- &iwe, wps_ie);
- }
- }
- }
/* Add quality statistics */
iwe.cmd = IWEVQUAL;
rssi = r8712_signal_scale_mapping(pnetwork->network.Rssi);
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index f2552052b5d3..6316ffa3ab18 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -279,7 +279,7 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1
module_param_array(pc104_4, ulong, NULL, 0);
MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,...");
-static int rp_init(void);
+static int __init rp_init(void);
static void rp_cleanup_module(void);
module_init(rp_init);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index e025ca63d8e3..b4460d4da3aa 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1831,8 +1831,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial_port_in(port, UART_LSR);
- if (status & (UART_LSR_DR | UART_LSR_BI) &&
- iir & UART_IIR_RDI) {
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index a182331cf956..b93d7cfa7749 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -421,7 +421,16 @@ static int cpm_uart_startup(struct uart_port *port)
clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
}
cpm_uart_initbd(pinfo);
- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+ if (IS_SMC(pinfo)) {
+ out_be32(&pinfo->smcup->smc_rstate, 0);
+ out_be32(&pinfo->smcup->smc_tstate, 0);
+ out_be16(&pinfo->smcup->smc_rbptr,
+ in_be16(&pinfo->smcup->smc_rbase));
+ out_be16(&pinfo->smcup->smc_tbptr,
+ in_be16(&pinfo->smcup->smc_tbase));
+ } else {
+ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+ }
}
/* Install interrupt handler. */
retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port);
@@ -875,16 +884,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
(u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
/*
- * In case SMC1 is being relocated...
+ * In case SMC is being relocated...
*/
-#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase));
out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase));
out_be32(&up->smc_rstate, 0);
out_be32(&up->smc_tstate, 0);
out_be16(&up->smc_brkcr, 1); /* number of break chars */
out_be16(&up->smc_brkec, 0);
-#endif
/* Set up the uart parameters in the
* parameter ram.
@@ -898,8 +905,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
out_be16(&up->smc_brkec, 0);
out_be16(&up->smc_brkcr, 1);
- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
-
/* Set UART mode, 8 bit, no parity, one stop.
* Enable receive and transmit.
*/
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d7692eb3d185..6bb0cbacff90 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1737,6 +1737,7 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
+ int ret;
uport = uart_port_check(state);
if (!uport || uport->flags & UPF_DEAD)
@@ -1747,7 +1748,11 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
/*
* Start up the serial port.
*/
- return uart_startup(tty, state, 0);
+ ret = uart_startup(tty, state, 0);
+ if (ret > 0)
+ tty_port_set_active(port, 1);
+
+ return ret;
}
static const char *uart_type(struct uart_port *port)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 8fcc0e412c84..c5ca13d7068b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -199,7 +199,7 @@ struct cdns_platform_data {
u32 quirks;
};
#define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \
- clk_rate_change_nb);
+ clk_rate_change_nb)
/**
* cdns_uart_handle_rx - Handle the received bytes along with Rx errors.
@@ -312,15 +312,16 @@ static void cdns_uart_handle_tx(void *dev_id)
} else {
numbytes = port->fifosize;
while (numbytes && !uart_circ_empty(&port->state->xmit) &&
- !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
+ !(readl(port->membase + CDNS_UART_SR) &
+ CDNS_UART_SR_TXFULL)) {
/*
* Get the data from the UART circular buffer
* and write it to the cdns_uart's TX_FIFO
* register.
*/
writel(
- port->state->xmit.buf[port->state->xmit.
- tail], port->membase + CDNS_UART_FIFO);
+ port->state->xmit.buf[port->state->xmit.tail],
+ port->membase + CDNS_UART_FIFO);
port->icount.tx++;
@@ -1065,8 +1066,6 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
cpu_relax();
spin_unlock_irqrestore(&port->lock, flags);
-
- return;
}
#endif
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index bbfe79e2b9a9..563e15e13c70 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -190,11 +190,12 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
out = dev->port_usb->out_ep;
else
out = NULL;
- spin_unlock_irqrestore(&dev->lock, flags);
if (!out)
+ {
+ spin_unlock_irqrestore(&dev->lock, flags);
return -ENOTCONN;
-
+ }
/* Padding up to RX_EXTRA handles minor disagreements with host.
* Normally we use the USB "terminate on short read" convention;
@@ -218,6 +219,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
if (dev->port_usb->is_fixed)
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
+ spin_unlock_irqrestore(&dev->lock, flags);
skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
if (skb == NULL) {
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index e0c1b0099265..089f39de6897 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1345,12 +1345,15 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
static int fusb300_remove(struct platform_device *pdev)
{
struct fusb300 *fusb300 = platform_get_drvdata(pdev);
+ int i;
usb_del_gadget_udc(&fusb300->gadget);
iounmap(fusb300->reg);
free_irq(platform_get_irq(pdev, 0), fusb300);
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+ kfree(fusb300->ep[i]);
kfree(fusb300);
return 0;
@@ -1494,6 +1497,8 @@ clean_up:
if (fusb300->ep0_req)
fusb300_free_request(&fusb300->ep[0]->ep,
fusb300->ep0_req);
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+ kfree(fusb300->ep[i]);
kfree(fusb300);
}
if (reg)
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 8f32b5ee7734..6df1aded4503 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -935,8 +935,7 @@ static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
dma_addr_t dma;
struct lpc32xx_usbd_dd_gad *dd;
- dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
- udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
+ dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma);
if (dd)
dd->this_dma = dma;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 4ec56fe6446a..ecd71a51b3ac 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -69,42 +69,6 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
}
}
-/*
- * reset all the locked nodes in the patch to spinning locks.
- *
- * held is used to keep lockdep happy, when lockdep is enabled
- * we set held to a blocking lock before we go around and
- * retake all the spinlocks in the path. You can safely use NULL
- * for held
- */
-noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held, int held_rw)
-{
- int i;
-
- if (held) {
- btrfs_set_lock_blocking_rw(held, held_rw);
- if (held_rw == BTRFS_WRITE_LOCK)
- held_rw = BTRFS_WRITE_LOCK_BLOCKING;
- else if (held_rw == BTRFS_READ_LOCK)
- held_rw = BTRFS_READ_LOCK_BLOCKING;
- }
- btrfs_set_path_blocking(p);
-
- for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
- if (p->nodes[i] && p->locks[i]) {
- btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
- if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
- p->locks[i] = BTRFS_WRITE_LOCK;
- else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
- p->locks[i] = BTRFS_READ_LOCK;
- }
- }
-
- if (held)
- btrfs_clear_lock_blocking_rw(held, held_rw);
-}
-
/* this also releases the path */
void btrfs_free_path(struct btrfs_path *p)
{
@@ -1430,7 +1394,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
}
}
- btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
@@ -2612,7 +2575,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
btrfs_set_path_blocking(p);
reada_for_balance(fs_info, p, level);
sret = split_node(trans, root, p, level);
- btrfs_clear_path_blocking(p, NULL, 0);
BUG_ON(sret > 0);
if (sret) {
@@ -2633,7 +2595,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
btrfs_set_path_blocking(p);
reada_for_balance(fs_info, p, level);
sret = balance_level(trans, root, p, level);
- btrfs_clear_path_blocking(p, NULL, 0);
if (sret) {
ret = sret;
@@ -2929,7 +2890,10 @@ again:
}
cow_done:
p->nodes[level] = b;
- btrfs_clear_path_blocking(p, NULL, 0);
+ /*
+ * Leave path with blocking locks to avoid massive
+ * lock context switch, this is made on purpose.
+ */
/*
* we have a lock on b and as long as we aren't changing
@@ -3011,8 +2975,6 @@ cow_done:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_WRITE_LOCK);
}
p->locks[level] = BTRFS_WRITE_LOCK;
} else {
@@ -3020,8 +2982,6 @@ cow_done:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_READ_LOCK);
}
p->locks[level] = BTRFS_READ_LOCK;
}
@@ -3040,7 +3000,6 @@ cow_done:
btrfs_set_path_blocking(p);
err = split_leaf(trans, root, key,
p, ins_len, ret == 0);
- btrfs_clear_path_blocking(p, NULL, 0);
BUG_ON(err > 0);
if (err) {
@@ -3107,7 +3066,6 @@ again:
while (b) {
level = btrfs_header_level(b);
p->nodes[level] = b;
- btrfs_clear_path_blocking(p, NULL, 0);
/*
* we have a lock on b and as long as we aren't changing
@@ -3153,8 +3111,6 @@ again:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_READ_LOCK);
}
b = tree_mod_log_rewind(fs_info, p, b, time_seq);
if (!b) {
@@ -5359,7 +5315,6 @@ find_next_key:
path->locks[level - 1] = BTRFS_READ_LOCK;
path->nodes[level - 1] = cur;
unlock_up(path, level, 1, 0, NULL);
- btrfs_clear_path_blocking(path, NULL, 0);
}
out:
path->keep_locks = keep_locks;
@@ -5949,8 +5904,6 @@ again:
if (!ret) {
btrfs_set_path_blocking(path);
btrfs_tree_read_lock(next);
- btrfs_clear_path_blocking(path, next,
- BTRFS_READ_LOCK);
}
next_rw_lock = BTRFS_READ_LOCK;
}
@@ -5986,8 +5939,6 @@ again:
if (!ret) {
btrfs_set_path_blocking(path);
btrfs_tree_read_lock(next);
- btrfs_clear_path_blocking(path, next,
- BTRFS_READ_LOCK);
}
next_rw_lock = BTRFS_READ_LOCK;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8513c4bada87..2be20d32cc3b 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2971,8 +2971,6 @@ void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
-void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held, int held_rw);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 9b1df3912260..baccc5215fa4 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -755,9 +755,6 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
i++;
}
- /* reset all the locked nodes in the patch to spinning locks. */
- btrfs_clear_path_blocking(path, NULL, 0);
-
/* insert the keys of the items */
setup_items_for_insert(root, path, keys, data_size,
total_data_size, total_size, nitems);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 5193218f5889..8b6b416e9ae1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -434,6 +434,7 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
res->l_lock_refresh = 0;
+ res->l_lock_wait = 0;
memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
}
@@ -468,6 +469,8 @@ static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
if (ret)
stats->ls_fail++;
+
+ stats->ls_last = ktime_to_us(ktime_get_real());
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
@@ -475,6 +478,21 @@ static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
lockres->l_lock_refresh++;
}
+static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_mask_waiter *mw;
+
+ if (list_empty(&lockres->l_mask_waiters)) {
+ lockres->l_lock_wait = 0;
+ return;
+ }
+
+ mw = list_first_entry(&lockres->l_mask_waiters,
+ struct ocfs2_mask_waiter, mw_item);
+ lockres->l_lock_wait =
+ ktime_to_us(ktime_mono_to_real(mw->mw_lock_start));
+}
+
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
mw->mw_lock_start = ktime_get();
@@ -490,6 +508,9 @@ static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
{
}
+static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
+{
+}
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
}
@@ -861,6 +882,7 @@ static void lockres_set_flags(struct ocfs2_lock_res *lockres,
list_del_init(&mw->mw_item);
mw->mw_status = 0;
complete(&mw->mw_complete);
+ ocfs2_track_lock_wait(lockres);
}
}
static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
@@ -1372,6 +1394,7 @@ static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
mw->mw_mask = mask;
mw->mw_goal = goal;
+ ocfs2_track_lock_wait(lockres);
}
/* returns 0 if the mw that was removed was already satisfied, -EBUSY
@@ -1388,6 +1411,7 @@ static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
list_del_init(&mw->mw_item);
init_completion(&mw->mw_complete);
+ ocfs2_track_lock_wait(lockres);
}
return ret;
@@ -2825,6 +2849,8 @@ struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
kref_init(&dlm_debug->d_refcnt);
INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
dlm_debug->d_locking_state = NULL;
+ dlm_debug->d_locking_filter = NULL;
+ dlm_debug->d_filter_secs = 0;
out:
return dlm_debug;
}
@@ -2915,17 +2941,43 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
* - Lock stats printed
* New in version 3
* - Max time in lock stats is in usecs (instead of nsecs)
+ * New in version 4
+ * - Add last pr/ex unlock times and first lock wait time in usecs
*/
-#define OCFS2_DLM_DEBUG_STR_VERSION 3
+#define OCFS2_DLM_DEBUG_STR_VERSION 4
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{
int i;
char *lvb;
struct ocfs2_lock_res *lockres = v;
+#ifdef CONFIG_OCFS2_FS_STATS
+ u64 now, last;
+ struct ocfs2_dlm_debug *dlm_debug =
+ ((struct ocfs2_dlm_seq_priv *)m->private)->p_dlm_debug;
+#endif
if (!lockres)
return -EINVAL;
+#ifdef CONFIG_OCFS2_FS_STATS
+ if (!lockres->l_lock_wait && dlm_debug->d_filter_secs) {
+ now = ktime_to_us(ktime_get_real());
+ if (lockres->l_lock_prmode.ls_last >
+ lockres->l_lock_exmode.ls_last)
+ last = lockres->l_lock_prmode.ls_last;
+ else
+ last = lockres->l_lock_exmode.ls_last;
+ /*
+ * Use d_filter_secs field to filter lock resources dump,
+ * the default d_filter_secs(0) value filters nothing,
+ * otherwise, only dump the last N seconds active lock
+ * resources.
+ */
+ if (div_u64(now - last, 1000000) > dlm_debug->d_filter_secs)
+ return 0;
+ }
+#endif
+
seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
@@ -2967,6 +3019,9 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
# define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
# define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
# define lock_refresh(_l) ((_l)->l_lock_refresh)
+# define lock_last_prmode(_l) ((_l)->l_lock_prmode.ls_last)
+# define lock_last_exmode(_l) ((_l)->l_lock_exmode.ls_last)
+# define lock_wait(_l) ((_l)->l_lock_wait)
#else
# define lock_num_prmode(_l) (0)
# define lock_num_exmode(_l) (0)
@@ -2977,6 +3032,9 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
# define lock_max_prmode(_l) (0)
# define lock_max_exmode(_l) (0)
# define lock_refresh(_l) (0)
+# define lock_last_prmode(_l) (0ULL)
+# define lock_last_exmode(_l) (0ULL)
+# define lock_wait(_l) (0ULL)
#endif
/* The following seq_print was added in version 2 of this output */
seq_printf(m, "%u\t"
@@ -2987,7 +3045,10 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
"%llu\t"
"%u\t"
"%u\t"
- "%u\t",
+ "%u\t"
+ "%llu\t"
+ "%llu\t"
+ "%llu\t",
lock_num_prmode(lockres),
lock_num_exmode(lockres),
lock_num_prmode_failed(lockres),
@@ -2996,7 +3057,10 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
lock_total_exmode(lockres),
lock_max_prmode(lockres),
lock_max_exmode(lockres),
- lock_refresh(lockres));
+ lock_refresh(lockres),
+ lock_last_prmode(lockres),
+ lock_last_exmode(lockres),
+ lock_wait(lockres));
/* End the line */
seq_printf(m, "\n");
@@ -3067,6 +3131,17 @@ static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
goto out;
}
+ dlm_debug->d_locking_filter = debugfs_create_u32("locking_filter",
+ 0600,
+ osb->osb_debug_root,
+ &dlm_debug->d_filter_secs);
+ if (!dlm_debug->d_locking_filter) {
+ ret = -EINVAL;
+ mlog(ML_ERROR,
+ "Unable to create locking filter debugfs file.\n");
+ goto out;
+ }
+
ocfs2_get_dlm_debug(dlm_debug);
out:
return ret;
@@ -3078,6 +3153,7 @@ static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
if (dlm_debug) {
debugfs_remove(dlm_debug->d_locking_state);
+ debugfs_remove(dlm_debug->d_locking_filter);
ocfs2_put_dlm_debug(dlm_debug);
}
}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 0c39d71c67a1..b7fd0681c5ed 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -162,6 +162,7 @@ struct ocfs2_lock_stats {
/* Storing max wait in usecs saves 24 bytes per inode */
u32 ls_max; /* Max wait in USEC */
+ u64 ls_last; /* Last unlock time in USEC */
};
#endif
@@ -203,6 +204,7 @@ struct ocfs2_lock_res {
#ifdef CONFIG_OCFS2_FS_STATS
struct ocfs2_lock_stats l_lock_prmode; /* PR mode stats */
u32 l_lock_refresh; /* Disk refreshes */
+ u64 l_lock_wait; /* First lock wait time */
struct ocfs2_lock_stats l_lock_exmode; /* EX mode stats */
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -234,6 +236,8 @@ struct ocfs2_orphan_scan {
struct ocfs2_dlm_debug {
struct kref d_refcnt;
struct dentry *d_locking_state;
+ struct dentry *d_locking_filter;
+ u32 d_filter_secs;
struct list_head d_lockres_tracking;
};
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index eb0b1a070dc5..d3516fffac67 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -472,7 +472,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
*
* Find the css for the (@task, @subsys_id) combination, increment a
* reference on and return it. This function is guaranteed to return a
- * valid css.
+ * valid css. The returned css may already have been offlined.
*/
static inline struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
@@ -482,7 +482,13 @@ task_get_css(struct task_struct *task, int subsys_id)
rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);
- if (likely(css_tryget_online(css)))
+ /*
+ * Can't use css_tryget_online() here. A task which has
+ * PF_EXITING set may stay associated with an offline css.
+ * If such task calls this function, css_tryget_online()
+ * will keep failing.
+ */
+ if (likely(css_tryget(css)))
break;
cpu_relax();
}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 410668292696..d3b89b324b39 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -245,20 +245,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
static struct freq_attr _name = \
__ATTR(_name, 0200, NULL, store_##_name)
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
-};
-
#define define_one_global_ro(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
diff --git a/include/linux/device.h b/include/linux/device.h
index f5fc47f34055..863eff797c11 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1033,7 +1033,7 @@ static inline void set_dev_node(struct device *dev, int node)
#else
static inline int dev_to_node(struct device *dev)
{
- return -1;
+ return NUMA_NO_NODE;
}
static inline void set_dev_node(struct device *dev, int node)
{
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2008de121705..22b3b47e7a66 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -189,7 +189,7 @@ extern struct cred init_cred;
#ifdef CONFIG_NUMA_BALANCING
# define INIT_NUMA_BALANCING(tsk) \
- .numa_preferred_nid = -1, \
+ .numa_preferred_nid = NUMA_NO_NODE, \
.numa_group = NULL, \
.numa_faults = NULL,
#else
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b776d4f6ce4d..5ba3fa2fa50e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1268,4 +1268,14 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+#ifdef CONFIG_HAVE_KVM_NO_POLL
+/* Callback that tells if we must not poll */
+bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
+#else
+static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif /* CONFIG_HAVE_KVM_NO_POLL */
+
#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97bfd3b26dec..5a295971afd6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -523,6 +523,11 @@ static inline bool is_vmalloc_addr(const void *x)
return false;
#endif
}
+
+#ifndef is_ioremap_addr
+#define is_ioremap_addr(x) is_vmalloc_addr(x)
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 87adec4fb8cb..988e3a7977d3 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -78,7 +78,7 @@ struct btf_type {
* is the 32 bits arrangement:
*/
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
-#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
+#define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16)
#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
/* Attributes stored in the BTF_INT_ENCODING */
diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h
new file mode 100644
index 000000000000..d10b832c58c5
--- /dev/null
+++ b/include/uapi/linux/isst_if.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Speed Select Interface: OS to hardware Interface
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#ifndef __ISST_IF_H
+#define __ISST_IF_H
+
+#include <linux/types.h>
+
+/**
+ * struct isst_if_platform_info - Define platform information
+ * @api_version: Version of the firmware document, which this driver
+ * can communicate
+ * @driver_version: Driver version, which will help user to send right
+ * commands. Even if the firmware is capable, driver may
+ * not be ready
+ * @max_cmds_per_ioctl: Returns the maximum number of commands driver will
+ * accept in a single ioctl
+ * @mbox_supported: Support of mail box interface
+ * @mmio_supported: Support of mmio interface for core-power feature
+ *
+ * Used to return output of IOCTL ISST_IF_GET_PLATFORM_INFO. This
+ * information can be used by the user space, to get the driver, firmware
+ * support and also number of commands to send in a single IOCTL request.
+ */
+struct isst_if_platform_info {
+ __u16 api_version;
+ __u16 driver_version;
+ __u16 max_cmds_per_ioctl;
+ __u8 mbox_supported;
+ __u8 mmio_supported;
+};
+
+/**
+ * struct isst_if_cpu_map - CPU mapping between logical and physical CPU
+ * @logical_cpu: Linux logical CPU number
+ * @physical_cpu: PUNIT CPU number
+ *
+ * Used to convert from Linux logical CPU to PUNIT CPU numbering scheme.
+ * The PUNIT CPU number is different than APIC ID based CPU numbering.
+ */
+struct isst_if_cpu_map {
+ __u32 logical_cpu;
+ __u32 physical_cpu;
+};
+
+/**
+ * struct isst_if_cpu_maps - structure for CPU map IOCTL
+ * @cmd_count: Number of CPU mapping command in cpu_map[]
+ * @cpu_map[]: Holds one or more CPU map data structure
+ *
+ * This structure used with ioctl ISST_IF_GET_PHY_ID to send
+ * one or more CPU mapping commands. Here IOCTL return value indicates
+ * number of commands sent or error number if no commands have been sent.
+ */
+struct isst_if_cpu_maps {
+ __u32 cmd_count;
+ struct isst_if_cpu_map cpu_map[1];
+};
+
+/**
+ * struct isst_if_io_reg - Read write PUNIT IO register
+ * @read_write: Value 0: Read, 1: Write
+ * @logical_cpu: Logical CPU number to get target PCI device.
+ * @reg: PUNIT register offset
+ * @value: For write operation value to write and for
+ * for read placeholder read value
+ *
+ * Structure to specify read/write data to PUNIT registers.
+ */
+struct isst_if_io_reg {
+ __u32 read_write; /* Read:0, Write:1 */
+ __u32 logical_cpu;
+ __u32 reg;
+ __u32 value;
+};
+
+/**
+ * struct isst_if_io_regs - structure for IO register commands
+ * @cmd_count: Number of io reg commands in io_reg[]
+ * @io_reg[]: Holds one or more io_reg command structure
+ *
+ * This structure used with ioctl ISST_IF_IO_CMD to send
+ * one or more read/write commands to PUNIT. Here IOCTL return value
+ * indicates number of requests sent or error number if no requests have
+ * been sent.
+ */
+struct isst_if_io_regs {
+ __u32 req_count;
+ struct isst_if_io_reg io_reg[1];
+};
+
+/**
+ * struct isst_if_mbox_cmd - Structure to define mail box command
+ * @logical_cpu: Logical CPU number to get target PCI device
+ * @parameter: Mailbox parameter value
+ * @req_data: Request data for the mailbox
+ * @resp_data: Response data for mailbox command response
+ * @command: Mailbox command value
+ * @sub_command: Mailbox sub command value
+ * @reserved: Unused, set to 0
+ *
+ * Structure to specify mailbox command to be sent to PUNIT.
+ */
+struct isst_if_mbox_cmd {
+ __u32 logical_cpu;
+ __u32 parameter;
+ __u32 req_data;
+ __u32 resp_data;
+ __u16 command;
+ __u16 sub_command;
+ __u32 reserved;
+};
+
+/**
+ * struct isst_if_mbox_cmds - structure for mailbox commands
+ * @cmd_count: Number of mailbox commands in mbox_cmd[]
+ * @mbox_cmd[]: Holds one or more mbox commands
+ *
+ * This structure used with ioctl ISST_IF_MBOX_COMMAND to send
+ * one or more mailbox commands to PUNIT. Here IOCTL return value
+ * indicates number of commands sent or error number if no commands have
+ * been sent.
+ */
+struct isst_if_mbox_cmds {
+ __u32 cmd_count;
+ struct isst_if_mbox_cmd mbox_cmd[1];
+};
+
+/**
+ * struct isst_if_msr_cmd - Structure to define msr command
+ * @read_write: Value 0: Read, 1: Write
+ * @logical_cpu: Logical CPU number
+ * @msr: MSR number
+ * @data: For write operation, data to write, for read
+ * place holder
+ *
+ * Structure to specify MSR command related to PUNIT.
+ */
+struct isst_if_msr_cmd {
+ __u32 read_write; /* Read:0, Write:1 */
+ __u32 logical_cpu;
+ __u64 msr;
+ __u64 data;
+};
+
+/**
+ * struct isst_if_msr_cmds - structure for msr commands
+ * @cmd_count: Number of mailbox commands in msr_cmd[]
+ * @msr_cmd[]: Holds one or more msr commands
+ *
+ * This structure used with ioctl ISST_IF_MSR_COMMAND to send
+ * one or more MSR commands. IOCTL return value indicates number of
+ * commands sent or error number if no commands have been sent.
+ */
+struct isst_if_msr_cmds {
+ __u32 cmd_count;
+ struct isst_if_msr_cmd msr_cmd[1];
+};
+
+#define ISST_IF_MAGIC 0xFE
+#define ISST_IF_GET_PLATFORM_INFO _IOR(ISST_IF_MAGIC, 0, struct isst_if_platform_info *)
+#define ISST_IF_GET_PHY_ID _IOWR(ISST_IF_MAGIC, 1, struct isst_if_cpu_map *)
+#define ISST_IF_IO_CMD _IOW(ISST_IF_MAGIC, 2, struct isst_if_io_regs *)
+#define ISST_IF_MBOX_COMMAND _IOWR(ISST_IF_MAGIC, 3, struct isst_if_mbox_cmds *)
+#define ISST_IF_MSR_COMMAND _IOWR(ISST_IF_MAGIC, 4, struct isst_if_msr_cmds *)
+#endif
diff --git a/init/init_task.c b/init/init_task.c
index 66787e30a419..5cf3cf79d983 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/numa.h>
#include <asm/pgtable.h>
#include <linux/uaccess.h>
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 3de085910b6c..28a13ee05574 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -20,6 +20,7 @@
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/numa.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
@@ -660,7 +661,7 @@ __kthread_create_worker(int cpu, unsigned int flags,
{
struct kthread_worker *worker;
struct task_struct *task;
- int node = -1;
+ int node = NUMA_NO_NODE;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (!worker)
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 9e9540f98c5b..b3fc3cd4ee35 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -264,7 +264,6 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
trace.max_entries = MAX_STACK_ENTRIES;
trace.entries = entries;
ret = save_stack_trace_tsk_reliable(task, &trace);
- WARN_ON_ONCE(ret == -ENOSYS);
if (ret) {
snprintf(err_buf, STACK_ERR_BUF_SIZE,
"%s: %s:%d has an unreliable stack\n",
@@ -297,11 +296,11 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
*/
static bool klp_try_switch_task(struct task_struct *task)
{
+ static char err_buf[STACK_ERR_BUF_SIZE];
struct rq *rq;
struct rq_flags flags;
int ret;
bool success = false;
- char err_buf[STACK_ERR_BUF_SIZE];
err_buf[0] = '\0';
@@ -310,6 +309,13 @@ static bool klp_try_switch_task(struct task_struct *task)
return true;
/*
+ * For arches which don't have reliable stack traces, we have to rely
+ * on other methods (e.g., switching tasks at kernel exit).
+ */
+ if (!klp_have_reliable_stack())
+ return false;
+
+ /*
* Now try to check the stack for any to-be-patched or to-be-unpatched
* functions. If all goes well, switch the task to the target patch
* state.
@@ -344,7 +350,6 @@ done:
pr_debug("%s", err_buf);
return success;
-
}
/*
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 8ec688fdb8ee..97baac4acdf9 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(memremap);
void memunmap(void *addr)
{
- if (is_vmalloc_addr(addr))
+ if (is_ioremap_addr(addr))
iounmap((void __iomem *) addr);
}
EXPORT_SYMBOL(memunmap);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c2a5e72c759f..f0c001ca0df4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1174,7 +1174,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
/* New address space, reset the preferred nid */
if (!(clone_flags & CLONE_VM)) {
- p->numa_preferred_nid = -1;
+ p->numa_preferred_nid = NUMA_NO_NODE;
return;
}
@@ -1194,13 +1194,13 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
{
- rq->nr_numa_running += (p->numa_preferred_nid != -1);
+ rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
}
static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
{
- rq->nr_numa_running -= (p->numa_preferred_nid != -1);
+ rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
}
@@ -1414,7 +1414,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
* two full passes of the "multi-stage node selection" test that is
* executed below.
*/
- if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
+ if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
(cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
return true;
@@ -1863,7 +1863,7 @@ static void numa_migrate_preferred(struct task_struct *p)
unsigned long interval = HZ;
/* This task has no NUMA fault statistics yet */
- if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
+ if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
return;
/* Periodically retry migrating the task to the preferred node */
@@ -2110,7 +2110,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
static void task_numa_placement(struct task_struct *p)
{
- int seq, nid, max_nid = -1;
+ int seq, nid, max_nid = NUMA_NO_NODE;
unsigned long max_faults = 0;
unsigned long fault_types[2] = { 0, 0 };
unsigned long total_faults;
@@ -2653,7 +2653,8 @@ static void update_scan_period(struct task_struct *p, int new_cpu)
* the preferred node.
*/
if (dst_nid == p->numa_preferred_nid ||
- (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
+ (p->numa_preferred_nid != NUMA_NO_NODE &&
+ src_nid != p->numa_preferred_nid))
return;
}
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9ea4b61d9350..19c073767314 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -343,8 +343,6 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
.arg4_type = ARG_CONST_SIZE,
};
-static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
-
static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
u64 flags, struct perf_sample_data *sd)
@@ -376,24 +374,50 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
return 0;
}
+/*
+ * Support executing tracepoints in normal, irq, and nmi context that each call
+ * bpf_perf_event_output
+ */
+struct bpf_trace_sample_data {
+ struct perf_sample_data sds[3];
+};
+
+static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
+static DEFINE_PER_CPU(int, bpf_trace_nest_level);
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags, void *, data, u64, size)
{
- struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
+ struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
+ int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
struct perf_raw_record raw = {
.frag = {
.size = size,
.data = data,
},
};
+ struct perf_sample_data *sd;
+ int err;
- if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
- return -EINVAL;
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ sd = &sds->sds[nest_level - 1];
+
+ if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
+ err = -EINVAL;
+ goto out;
+ }
perf_sample_data_init(sd, 0, 0);
sd->raw = &raw;
- return __bpf_perf_event_output(regs, map, flags, sd);
+ err = __bpf_perf_event_output(regs, map, flags, sd);
+
+out:
+ this_cpu_dec(bpf_trace_nest_level);
+ return err;
}
static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -746,16 +770,48 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
/*
* bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
* to avoid potential recursive reuse issue when/if tracepoints are added
- * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
+ * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
+ *
+ * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
+ * in normal, irq, and nmi context.
*/
-static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
+struct bpf_raw_tp_regs {
+ struct pt_regs regs[3];
+};
+static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
+static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
+static struct pt_regs *get_bpf_raw_tp_regs(void)
+{
+ struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
+
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
+ this_cpu_dec(bpf_raw_tp_nest_level);
+ return ERR_PTR(-EBUSY);
+ }
+
+ return &tp_regs->regs[nest_level - 1];
+}
+
+static void put_bpf_raw_tp_regs(void)
+{
+ this_cpu_dec(bpf_raw_tp_nest_level);
+}
+
BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags, void *, data, u64, size)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
- return ____bpf_perf_event_output(regs, map, flags, data, size);
+ ret = ____bpf_perf_event_output(regs, map, flags, data, size);
+
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
@@ -772,12 +828,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
- return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
- flags, 0, 0);
+ ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
+ flags, 0, 0);
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
@@ -792,11 +854,17 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
void *, buf, u32, size, u64, flags)
{
- struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
+ struct pt_regs *regs = get_bpf_raw_tp_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
perf_fetch_caller_regs(regs);
- return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
- (unsigned long) size, flags, 0);
+ ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
+ (unsigned long) size, flags, 0);
+ put_bpf_raw_tp_regs();
+ return ret;
}
static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 113e8bf66fe8..d99cb0a2ebab 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -606,7 +606,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
/* if no digit is after '-', it's wrong*/
if (at_start && in_range)
return -EINVAL;
- if (!(a <= b) || !(used_size <= group_size))
+ if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 2fa654bec3ba..cb1e4162c7ed 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -4,6 +4,7 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/bootmem.h>
+#include <linux/numa.h>
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
@@ -189,7 +190,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
/* Wrap: we always want a cpu. */
i %= num_online_cpus();
- if (node == -1) {
+ if (node == NUMA_NO_NODE) {
for_each_cpu(cpu, cpu_online_mask)
if (i-- == 0)
return cpu;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 06dad7a072fd..2043c39c9c1e 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -657,17 +657,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
if (!miter->__remaining) {
struct scatterlist *sg;
- unsigned long pgoffset;
if (!__sg_page_iter_next(&miter->piter))
return false;
sg = miter->piter.sg;
- pgoffset = miter->piter.sg_pgoffset;
- miter->__offset = pgoffset ? 0 : sg->offset;
+ miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
+ miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
+ miter->__offset &= PAGE_SIZE - 1;
miter->__remaining = sg->offset + sg->length -
- (pgoffset << PAGE_SHIFT) - miter->__offset;
+ (miter->piter.sg_pgoffset << PAGE_SHIFT) -
+ miter->__offset;
miter->__remaining = min_t(unsigned long, miter->__remaining,
PAGE_SIZE - miter->__offset);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 29463467900b..b0bd68791d49 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -33,6 +33,7 @@
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
#include <linux/oom.h>
+#include <linux/numa.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -1462,7 +1463,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
struct anon_vma *anon_vma = NULL;
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
- int page_nid = -1, this_nid = numa_node_id();
+ int page_nid = NUMA_NO_NODE, this_nid = numa_node_id();
int target_nid, last_cpupid = -1;
bool page_locked;
bool migrated = false;
@@ -1507,7 +1508,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
*/
page_locked = trylock_page(page);
target_nid = mpol_misplaced(page, vma, haddr);
- if (target_nid == -1) {
+ if (target_nid == NUMA_NO_NODE) {
/* If the page was locked, there are no parallel migrations */
if (page_locked)
goto clear_pmdnuma;
@@ -1515,7 +1516,7 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
- page_nid = -1;
+ page_nid = NUMA_NO_NODE;
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl);
@@ -1536,14 +1537,14 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
unlock_page(page);
put_page(page);
- page_nid = -1;
+ page_nid = NUMA_NO_NODE;
goto out_unlock;
}
/* Bail if we fail to protect against THP splits for any reason */
if (unlikely(!anon_vma)) {
put_page(page);
- page_nid = -1;
+ page_nid = NUMA_NO_NODE;
goto clear_pmdnuma;
}
@@ -1585,7 +1586,7 @@ out:
if (anon_vma)
page_unlock_anon_vma_read(anon_vma);
- if (page_nid != -1)
+ if (page_nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
flags);
diff --git a/mm/memory.c b/mm/memory.c
index a62006774511..6feabbd0cf19 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -70,6 +70,7 @@
#include <linux/userfaultfd_k.h>
#include <linux/dax.h>
#include <linux/oom.h>
+#include <linux/numa.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -3728,7 +3729,7 @@ static int do_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL;
- int page_nid = -1;
+ int page_nid = NUMA_NO_NODE;
int last_cpupid;
int target_nid;
bool migrated = false;
@@ -3795,7 +3796,7 @@ static int do_numa_page(struct vm_fault *vmf)
target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
&flags);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- if (target_nid == -1) {
+ if (target_nid == NUMA_NO_NODE) {
put_page(page);
goto out;
}
@@ -3809,7 +3810,7 @@ static int do_numa_page(struct vm_fault *vmf)
flags |= TNF_MIGRATE_FAIL;
out:
- if (page_nid != -1)
+ if (page_nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, page_nid, 1, flags);
return 0;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 743a818f285f..2fa1d1c822e5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -829,7 +829,7 @@ static void node_states_check_changes_online(unsigned long nr_pages,
if (!node_state(nid, N_MEMORY))
arg->status_change_nid = nid;
else
- arg->status_change_nid = -1;
+ arg->status_change_nid = NUMA_NO_NODE;
}
static void node_states_set_node(int node, struct memory_notify *arg)
@@ -1735,7 +1735,7 @@ static void node_states_check_changes_offline(unsigned long nr_pages,
if (nr_pages >= present_pages)
arg->status_change_nid = zone_to_nid(zone);
else
- arg->status_change_nid = -1;
+ arg->status_change_nid = NUMA_NO_NODE;
}
static void node_states_clear_node(int node, struct memory_notify *arg)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 988879708295..9bdf474a13d5 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2298,7 +2298,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
unsigned long pgoff;
int thiscpu = raw_smp_processor_id();
int thisnid = cpu_to_node(thiscpu);
- int polnid = -1;
+ int polnid = NUMA_NO_NODE;
int ret = -1;
BUG_ON(!vma);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1814139e809f..cd00953b3dc9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5905,7 +5905,7 @@ int __meminit __early_pfn_to_nid(unsigned long pfn,
return state->last_nid;
nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
- if (nid != -1) {
+ if (nid != NUMA_NO_NODE) {
state->last_start = start_pfn;
state->last_end = end_pfn;
state->last_nid = nid;
@@ -6544,7 +6544,7 @@ unsigned long __init node_map_pfn_alignment(void)
{
unsigned long accl_mask = 0, last_end = 0;
unsigned long start, end, mask;
- int last_nid = -1;
+ int last_nid = NUMA_NO_NODE;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
@@ -7702,11 +7702,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* handle each tail page individually in migration.
*/
if (PageHuge(page)) {
+ struct page *head = compound_head(page);
+ unsigned int skip_pages;
- if (!hugepage_migration_supported(page_hstate(page)))
+ if (!hugepage_migration_supported(page_hstate(head)))
goto unmovable;
- iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+ skip_pages = (1 << compound_order(head)) - (page - head);
+ iter += skip_pages - 1;
continue;
}
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 9dbabbfc4557..d5ed3dce8dc3 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -300,7 +300,7 @@ static int __meminit online_page_ext(unsigned long start_pfn,
start = SECTION_ALIGN_DOWN(start_pfn);
end = SECTION_ALIGN_UP(start_pfn + nr_pages);
- if (nid == -1) {
+ if (nid == NUMA_NO_NODE) {
/*
* In this case, "nid" already exists and contains valid memory.
* "start_pfn" passed to us is a pfn which is an arg for
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index e454bd29671e..cf95de7e1489 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -3793,6 +3793,8 @@ static void batadv_tt_purge(struct work_struct *work)
void batadv_tt_free(struct batadv_priv *bat_priv)
{
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
+
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 95b2db0362d1..e2107323b190 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -518,13 +518,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
call_netdevice_notifiers(NETDEV_JOIN, dev);
err = dev_set_allmulti(dev, 1);
- if (err)
- goto put_back;
+ if (err) {
+ kfree(p); /* kobject not yet init'd, manually free */
+ goto err1;
+ }
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
SYSFS_BRIDGE_PORT_ATTR);
if (err)
- goto err1;
+ goto err2;
err = br_sysfs_addif(p);
if (err)
@@ -607,12 +609,9 @@ err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
kobject_put(&p->kobj);
- p = NULL; /* kobject_put frees */
-err1:
dev_set_allmulti(dev, -1);
-put_back:
+err1:
dev_put(dev);
- kfree(p);
return err;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index fd3345e550e1..5288b3397c50 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5667,7 +5667,6 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb);
- eth = skb_gro_header_fast(skb, 0);
if (unlikely(skb_gro_header_hard(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);
if (unlikely(!eth)) {
@@ -5677,6 +5676,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
return NULL;
}
} else {
+ eth = (const struct ethhdr *)skb->data;
gro_pull_from_frag0(skb, hlen);
NAPI_GRO_CB(skb)->frag0 += hlen;
NAPI_GRO_CB(skb)->frag0_len -= hlen;
@@ -8602,7 +8602,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
refcnt = netdev_refcnt_read(dev);
- if (time_after(jiffies, warning_time + 10 * HZ)) {
+ if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
dev->name, refcnt);
warning_time = jiffies;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 563923af8884..e2f537867a39 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -908,8 +908,13 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
if (rc >= 0)
info.n_priv_flags = rc;
}
- if (ops->get_regs_len)
- info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_regs_len) {
+ int ret = ops->get_regs_len(dev);
+
+ if (ret > 0)
+ info.regdump_len = ret;
+ }
+
if (ops->get_eeprom_len)
info.eedump_len = ops->get_eeprom_len(dev);
@@ -1447,6 +1452,9 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
return -EFAULT;
reglen = ops->get_regs_len(dev);
+ if (reglen <= 0)
+ return reglen;
+
if (regs.len > reglen)
regs.len = reglen;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 3b2034f6d49d..618e1e816ea1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -158,6 +158,7 @@
#include <linux/etherdevice.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
+#include <linux/mmzone.h>
#include <net/net_namespace.h>
#include <net/checksum.h>
#include <net/ipv6.h>
@@ -3149,7 +3150,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
{
while (thread_is_running(t)) {
+ /* note: 't' will still be around even after the unlock/lock
+ * cycle because pktgen_thread threads are only cleared at
+ * net exit
+ */
+ mutex_unlock(&pktgen_thread_lock);
msleep_interruptible(100);
+ mutex_lock(&pktgen_thread_lock);
if (signal_pending(current))
goto signal;
@@ -3164,6 +3171,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
struct pktgen_thread *t;
int sig = 1;
+ /* prevent from racing with rmmod */
+ if (!try_module_get(THIS_MODULE))
+ return sig;
+
mutex_lock(&pktgen_thread_lock);
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
@@ -3177,6 +3188,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
t->control |= (T_STOP);
mutex_unlock(&pktgen_thread_lock);
+ module_put(THIS_MODULE);
return sig;
}
@@ -3708,7 +3720,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->svlan_cfi = 0;
pkt_dev->svlan_id = 0xffff;
pkt_dev->burst = 1;
- pkt_dev->node = -1;
+ pkt_dev->node = NUMA_NO_NODE;
err = pktgen_setup_dev(t->net, pkt_dev, ifname);
if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5f7c1a008926..352bcd5a2619 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1398,6 +1398,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct nlattr *af_spec;
struct rtnl_af_ops *af_ops;
struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
+ bool put_iflink = false;
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -1426,8 +1427,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
#ifdef CONFIG_RPS
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
#endif
- (dev->ifindex != dev_get_iflink(dev) &&
- nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
(upper_dev &&
nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
@@ -1507,9 +1506,15 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
goto nla_put_failure;
+
+ put_iflink = true;
}
}
+ if ((put_iflink || dev->ifindex != dev_get_iflink(dev)) &&
+ nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))
+ goto nla_put_failure;
+
if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
goto nla_put_failure;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6afb20af0f93..64763ba13a23 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -190,6 +190,17 @@ static void ip_ma_put(struct ip_mc_list *im)
pmc != NULL; \
pmc = rtnl_dereference(pmc->next_rcu))
+static void ip_sf_list_clear_all(struct ip_sf_list *psf)
+{
+ struct ip_sf_list *next;
+
+ while (psf) {
+ next = psf->sf_next;
+ kfree(psf);
+ psf = next;
+ }
+}
+
#ifdef CONFIG_IP_MULTICAST
/*
@@ -635,6 +646,13 @@ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
}
}
+static void kfree_pmc(struct ip_mc_list *pmc)
+{
+ ip_sf_list_clear_all(pmc->sources);
+ ip_sf_list_clear_all(pmc->tomb);
+ kfree(pmc);
+}
+
static void igmpv3_send_cr(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
@@ -671,7 +689,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
else
in_dev->mc_tomb = pmc_next;
in_dev_put(pmc->interface);
- kfree(pmc);
+ kfree_pmc(pmc);
} else
pmc_prev = pmc;
}
@@ -1203,12 +1221,16 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
im->sfmode = pmc->sfmode;
if (pmc->sfmode == MCAST_INCLUDE) {
im->tomb = pmc->tomb;
+ pmc->tomb = NULL;
+
im->sources = pmc->sources;
+ pmc->sources = NULL;
+
for (psf = im->sources; psf; psf = psf->sf_next)
psf->sf_crcount = im->crcount;
}
in_dev_put(pmc->interface);
- kfree(pmc);
+ kfree_pmc(pmc);
}
spin_unlock_bh(&im->lock);
}
@@ -1229,21 +1251,18 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
nextpmc = pmc->next;
ip_mc_clear_src(pmc);
in_dev_put(pmc->interface);
- kfree(pmc);
+ kfree_pmc(pmc);
}
/* clear dead sources, too */
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
- struct ip_sf_list *psf, *psf_next;
+ struct ip_sf_list *psf;
spin_lock_bh(&pmc->lock);
psf = pmc->tomb;
pmc->tomb = NULL;
spin_unlock_bh(&pmc->lock);
- for (; psf; psf = psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
- }
+ ip_sf_list_clear_all(psf);
}
rcu_read_unlock();
}
@@ -2107,7 +2126,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
- struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
+ struct ip_sf_list *tomb, *sources;
spin_lock_bh(&pmc->lock);
tomb = pmc->tomb;
@@ -2119,14 +2138,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
pmc->sfcount[MCAST_EXCLUDE] = 1;
spin_unlock_bh(&pmc->lock);
- for (psf = tomb; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
- for (psf = sources; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
+ ip_sf_list_clear_all(tomb);
+ ip_sf_list_clear_all(sources);
}
/* Join a multicast group
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 94fe0b5213a5..eb5f3c178140 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -171,6 +171,7 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
*/
static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
{
+ int dif = inet_iif(skb);
struct sock *sk;
struct hlist_head *head;
int delivered = 0;
@@ -183,8 +184,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
net = dev_net(skb->dev);
sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
- iph->saddr, iph->daddr,
- skb->dev->ifindex);
+ iph->saddr, iph->daddr, dif);
while (sk) {
delivered = 1;
@@ -199,7 +199,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
}
sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
iph->saddr, iph->daddr,
- skb->dev->ifindex);
+ dif);
}
out:
read_unlock(&raw_v4_hashinfo.lock);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a5e5cf600f74..4cf9d5d3ee38 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2844,9 +2844,9 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
- if (WARN_ON(!tp->packets_out && tp->sacked_out))
+ if (!tp->packets_out && tp->sacked_out)
tp->sacked_out = 0;
- if (WARN_ON(!tp->sacked_out && tp->fackets_out))
+ if (!tp->sacked_out && tp->fackets_out)
tp->fackets_out = 0;
/* Now state machine starts.
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 676338c5a28b..dc7e2bfe47bf 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -285,7 +285,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* Binding to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
goto out_unlock;
+ }
+ if (sk->sk_bound_dev_if) {
err = -ENODEV;
dev = dev_get_by_index_rcu(sock_net(sk),
sk->sk_bound_dev_if);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 80cf460db8f9..642c9f0316fc 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1073,7 +1073,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
if (!tdev && tunnel->parms.link)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
- if (tdev) {
+ if (tdev && !netif_is_l3_master(tdev)) {
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 94425e421213..9e4b6bcf6920 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -72,6 +72,8 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac);
if (likely(!rc))
rc = dev_queue_xmit(skb);
+ else
+ kfree_skb(skb);
return rc;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 35c6dfa13fa8..cfd30671ccdf 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1410,7 +1410,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
+ if (WARN_ON_ONCE(!chanctx_conf)) {
rcu_read_unlock();
return NULL;
}
@@ -1999,6 +1999,13 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
{
/*
+ * It's unsafe to try to do any work during reconfigure flow.
+ * When the flow ends the work will be requeued.
+ */
+ if (local->in_reconfig)
+ return false;
+
+ /*
* If quiescing is set, we are racing with __ieee80211_suspend.
* __ieee80211_suspend flushes the workers after setting quiescing,
* and we check quiescing / suspended before enqueing new workers.
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d51da26e9c18..3162f955f3ae 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -923,6 +923,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
/* flush STAs and mpaths on this iface */
sta_info_flush(sdata);
+ ieee80211_free_keys(sdata, true);
mesh_path_flush_by_iface(sdata);
/* stop the beacon */
@@ -1212,7 +1213,8 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
ifmsh->chsw_ttl = 0;
/* Remove the CSA and MCSP elements from the beacon */
- tmp_csa_settings = rcu_dereference(ifmsh->csa);
+ tmp_csa_settings = rcu_dereference_protected(ifmsh->csa,
+ lockdep_is_held(&sdata->wdev.mtx));
RCU_INIT_POINTER(ifmsh->csa, NULL);
if (tmp_csa_settings)
kfree_rcu(tmp_csa_settings, rcu_head);
@@ -1234,6 +1236,8 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct mesh_csa_settings *tmp_csa_settings;
int ret = 0;
+ lockdep_assert_held(&sdata->wdev.mtx);
+
tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
GFP_ATOMIC);
if (!tmp_csa_settings)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index a9edd7df3b65..6a5445cf227b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2223,6 +2223,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_lock(&local->mtx);
ieee80211_start_next_roc(local);
mutex_unlock(&local->mtx);
+
+ /* Requeue all works */
+ list_for_each_entry(sdata, &local->interfaces, list)
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e0560c41e371..570c5353cb85 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -934,19 +934,22 @@ static unsigned int early_drop_list(struct net *net,
return drops;
}
-static noinline int early_drop(struct net *net, unsigned int _hash)
+static noinline int early_drop(struct net *net, unsigned int hash)
{
- unsigned int i;
+ unsigned int i, bucket;
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
struct hlist_nulls_head *ct_hash;
- unsigned int hash, hsize, drops;
+ unsigned int hsize, drops;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hsize);
- hash = reciprocal_scale(_hash++, hsize);
+ if (!i)
+ bucket = reciprocal_scale(hash, hsize);
+ else
+ bucket = (bucket + 1) % hsize;
- drops = early_drop_list(net, &ct_hash[hash]);
+ drops = early_drop_list(net, &ct_hash[bucket]);
rcu_read_unlock();
if (drops) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index baf0faad8f86..92b299e039fe 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3360,20 +3360,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
+ int copy_len;
+
/* If the address length field is there to be filled
* in, we fill it in now.
*/
if (sock->type == SOCK_PACKET) {
__sockaddr_check_size(sizeof(struct sockaddr_pkt));
msg->msg_namelen = sizeof(struct sockaddr_pkt);
+ copy_len = msg->msg_namelen;
} else {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
msg->msg_namelen = sll->sll_halen +
offsetof(struct sockaddr_ll, sll_addr);
+ copy_len = msg->msg_namelen;
+ if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
+ memset(msg->msg_name +
+ offsetof(struct sockaddr_ll, sll_addr),
+ 0, sizeof(sll->sll_addr));
+ msg->msg_namelen = sizeof(struct sockaddr_ll);
+ }
}
- memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
- msg->msg_namelen);
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
if (pkt_sk(sk)->auxdata) {
@@ -4315,7 +4324,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
req3->tp_sizeof_priv ||
req3->tp_feature_req_word) {
err = -EINVAL;
- goto out;
+ goto out_free_pg_vec;
}
}
break;
@@ -4379,6 +4388,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
prb_shutdown_retire_blk_timer(po, rb_queue);
}
+out_free_pg_vec:
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
@@ -4604,14 +4614,29 @@ static void __exit packet_exit(void)
static int __init packet_init(void)
{
- int rc = proto_register(&packet_proto, 0);
+ int rc;
- if (rc != 0)
+ rc = proto_register(&packet_proto, 0);
+ if (rc)
goto out;
+ rc = sock_register(&packet_family_ops);
+ if (rc)
+ goto out_proto;
+ rc = register_pernet_subsys(&packet_net_ops);
+ if (rc)
+ goto out_sock;
+ rc = register_netdevice_notifier(&packet_netdev_notifier);
+ if (rc)
+ goto out_pernet;
+
+ return 0;
- sock_register(&packet_family_ops);
- register_pernet_subsys(&packet_net_ops);
- register_netdevice_notifier(&packet_netdev_notifier);
+out_pernet:
+ unregister_pernet_subsys(&packet_net_ops);
+out_sock:
+ sock_unregister(PF_PACKET);
+out_proto:
+ proto_unregister(&packet_proto);
out:
return rc;
}
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 5586609afa27..1628975ff70a 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -15,6 +15,7 @@
#include <linux/netlink.h>
#include <linux/qrtr.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
+#include <linux/numa.h>
#include <net/sock.h>
@@ -77,7 +78,7 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
return container_of(sk, struct qrtr_sock, sk);
}
-static unsigned int qrtr_local_nid = -1;
+static unsigned int qrtr_local_nid = NUMA_NO_NODE;
/* for node ids */
static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 09ab97475fc9..f63742f3ab5f 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
pool->fmr_attr.max_pages);
if (IS_ERR(frmr->mr)) {
pr_warn("RDS/IB: %s failed to allocate MR", __func__);
+ err = PTR_ERR(frmr->mr);
goto out_no_cigar;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 85ec5460e6b2..c96d129a0959 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -745,14 +745,6 @@ static struct proto unix_proto = {
.obj_size = sizeof(struct unix_sock),
};
-/*
- * AF_UNIX sockets do not interact with hardware, hence they
- * dont trigger interrupts - so it's safe for them to have
- * bh-unsafe locking for their sk_receive_queue.lock. Split off
- * this special lock-class by reinitializing the spinlock key:
- */
-static struct lock_class_key af_unix_sk_receive_queue_lock_key;
-
static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
{
struct sock *sk = NULL;
@@ -767,8 +759,6 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
goto out;
sock_init_data(sock, sk);
- lockdep_set_class(&sk->sk_receive_queue.lock,
- &af_unix_sk_receive_queue_lock_key);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 40a8731c663b..84d3c0aadd73 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -786,12 +786,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
void virtio_transport_release(struct vsock_sock *vsk)
{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ struct virtio_vsock_pkt *pkt, *tmp;
struct sock *sk = &vsk->sk;
bool remove_sock = true;
lock_sock(sk);
if (sk->sk_type == SOCK_STREAM)
remove_sock = virtio_transport_close(vsk);
+
+ list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
release_sock(sk);
if (remove_sock)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index c4b637e6d9ab..a66c7900c930 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1218,7 +1218,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
if (rate->he_dcm)
result /= 2;
- return result;
+ return result / 10000;
}
u32 cfg80211_calculate_bitrate(struct rate_info *rate)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9da08a62cb8a..80a50a829507 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3266,6 +3266,7 @@ static void alc256_init(struct hda_codec *codec)
alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */
alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15);
+ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
}
static void alc256_shutup(struct hda_codec *codec)
@@ -7833,7 +7834,6 @@ static int patch_alc269(struct hda_codec *codec)
spec->shutup = alc256_shutup;
spec->init_hook = alc256_init;
spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
- alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
break;
case 0x10ec0257:
spec->codec_variant = ALC269_TYPE_ALC257;
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 6e8eb1f5a041..bed64723e5d9 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -60,7 +60,7 @@ static const struct reg_default cs4265_reg_defaults[] = {
static bool cs4265_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2:
+ case CS4265_CHIP_ID ... CS4265_MAX_REGISTER:
return true;
default:
return false;
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index ecab7d71ca74..9ac056ad9e36 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1924,6 +1924,21 @@ static int max98090_configure_dmic(struct max98090_priv *max98090,
return 0;
}
+static int max98090_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct max98090_priv *max98090 = snd_soc_component_get_drvdata(component);
+ unsigned int fmt = max98090->dai_fmt;
+
+ /* Remove 24-bit format support if it is not in right justified mode. */
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_RIGHT_J) {
+ substream->runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(substream->runtime, 0, 16, 16);
+ }
+ return 0;
+}
+
static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -2331,6 +2346,7 @@ EXPORT_SYMBOL_GPL(max98090_mic_detect);
#define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops max98090_dai_ops = {
+ .startup = max98090_dai_startup,
.set_sysclk = max98090_dai_set_sysclk,
.set_fmt = max98090_dai_set_fmt,
.set_tdm_slot = max98090_set_tdm_slot,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 3ff25babdc55..0814698f0238 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2233,7 +2233,8 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
dev_dbg(be->dev, "ASoC: prepare BE %s\n",
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 0a2a27f2854d..08e635b9a9d2 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -295,6 +295,33 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
return nr_rates;
}
+/* Line6 Helix series don't support the UAC2_CS_RANGE usb function
+ * call. Return a static table of known clock rates.
+ */
+static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
+ struct audioformat *fp)
+{
+ switch (chip->usb_id) {
+ case USB_ID(0x0E41, 0x4241): /* Line6 Helix */
+ case USB_ID(0x0E41, 0x4242): /* Line6 Helix Rack */
+ case USB_ID(0x0E41, 0x4244): /* Line6 Helix LT */
+ case USB_ID(0x0E41, 0x4246): /* Line6 HX-Stomp */
+ /* supported rates: 48Khz */
+ kfree(fp->rate_table);
+ fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+ if (!fp->rate_table)
+ return -ENOMEM;
+ fp->nr_rates = 1;
+ fp->rate_min = 48000;
+ fp->rate_max = 48000;
+ fp->rates = SNDRV_PCM_RATE_48000;
+ fp->rate_table[0] = 48000;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
/*
* parse the format descriptor and stores the possible sample rates
* on the audioformat table (audio class v2 and v3).
@@ -304,7 +331,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip,
{
struct usb_device *dev = chip->dev;
unsigned char tmp[2], *data;
- int nr_triplets, data_size, ret = 0;
+ int nr_triplets, data_size, ret = 0, ret_l6;
int clock = snd_usb_clock_find_source(chip, fp->protocol,
fp->clock, false);
@@ -323,9 +350,22 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip,
tmp, sizeof(tmp));
if (ret < 0) {
- dev_err(&dev->dev,
- "%s(): unable to retrieve number of sample rates (clock %d)\n",
+ /* line6 helix devices don't support UAC2_CS_CONTROL_SAM_FREQ call */
+ ret_l6 = line6_parse_audio_format_rates_quirk(chip, fp);
+ if (ret_l6 == -ENODEV) {
+ /* no line6 device found continue showing the error */
+ dev_err(&dev->dev,
+ "%s(): unable to retrieve number of sample rates (clock %d)\n",
+ __func__, clock);
+ goto err;
+ }
+ if (ret_l6 == 0) {
+ dev_info(&dev->dev,
+ "%s(): unable to retrieve number of sample rates: set it to a predefined value (clock %d).\n",
__func__, clock);
+ return 0;
+ }
+ ret = ret_l6;
goto err;
}
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 7712e2b84183..b1cc9499c57e 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -76,6 +76,20 @@ void *snd_usb_find_csint_desc(void *buffer, int buflen, void *after, u8 dsubtype
return NULL;
}
+/* check the validity of pipe and EP types */
+int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe)
+{
+ static const int pipetypes[4] = {
+ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+ };
+ struct usb_host_endpoint *ep;
+
+ ep = usb_pipe_endpoint(dev, pipe);
+ if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
+ return -EINVAL;
+ return 0;
+}
+
/*
* Wrapper for usb_control_msg().
* Allocates a temp buffer to prevent dmaing from/to the stack.
@@ -88,6 +102,9 @@ int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
void *buf = NULL;
int timeout;
+ if (snd_usb_pipe_sanity_check(dev, pipe))
+ return -EINVAL;
+
if (size > 0) {
buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
diff --git a/sound/usb/helper.h b/sound/usb/helper.h
index 693f247de197..4777a44c823d 100644
--- a/sound/usb/helper.h
+++ b/sound/usb/helper.h
@@ -6,6 +6,7 @@ unsigned int snd_usb_combine_bytes(unsigned char *bytes, int size);
void *snd_usb_find_desc(void *descstart, int desclen, void *after, u8 dtype);
void *snd_usb_find_csint_desc(void *descstart, int desclen, void *after, u8 dsubtype);
+int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe);
int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
void *data, __u16 size);
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 4e7313a727ef..bde101d95243 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -354,6 +354,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x81;
ifnum = 1;
goto add_sync_ep_from_ifnum;
+ case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */
+ ep = 0x84;
+ ifnum = 0;
+ goto add_sync_ep_from_ifnum;
}
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index b345beb447bd..d764a109ded8 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2422,7 +2422,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
USB_DEVICE(0x086a, 0x0001),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
.vendor_name = "Emagic",
- /* .product_name = "Unitor8", */
+ .product_name = "Unitor8",
.ifnum = 2,
.type = QUIRK_MIDI_EMAGIC,
.data = & (const struct snd_usb_midi_endpoint_info) {
@@ -2770,6 +2770,90 @@ YAMAHA_DEVICE(0x7010, "UB99"),
.type = QUIRK_MIDI_NOVATION
}
},
+{
+ /*
+ * Focusrite Scarlett Solo 2nd generation
+ * Reports that playback should use Synch: Synchronous
+ * while still providing a feedback endpoint. Synchronous causes
+ * snapping on some sample rates.
+ * Force it to use Synch: Asynchronous.
+ */
+ USB_DEVICE(0x1235, 0x8205),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = & (const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 2,
+ .iface = 1,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x01,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .protocol = UAC_VERSION_2,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000,
+ .rate_min = 44100,
+ .rate_max = 192000,
+ .nr_rates = 6,
+ .rate_table = (unsigned int[]) {
+ 44100, 48000, 88200,
+ 96000, 176400, 192000
+ },
+ .clock = 41
+ }
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = & (const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 2,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC |
+ USB_ENDPOINT_USAGE_IMPLICIT_FB,
+ .protocol = UAC_VERSION_2,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000,
+ .rate_min = 44100,
+ .rate_max = 192000,
+ .nr_rates = 6,
+ .rate_table = (unsigned int[]) {
+ 44100, 48000, 88200,
+ 96000, 176400, 192000
+ },
+ .clock = 41
+ }
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
/* Access Music devices */
{
@@ -3398,5 +3482,70 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.ifnum = QUIRK_NO_INTERFACE
}
},
+/* MOTU Microbook II */
+{
+ USB_DEVICE(0x07fd, 0x0004),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "MOTU",
+ .product_name = "MicroBookII",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 6,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x84,
+ .rates = SNDRV_PCM_RATE_96000,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rate_min = 96000,
+ .rate_max = 96000,
+ .nr_rates = 1,
+ .maxpacksize = 0x00d8,
+ .rate_table = (unsigned int[]) {
+ 96000
+ }
+ }
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 8,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = 0,
+ .endpoint = 0x03,
+ .rates = SNDRV_PCM_RATE_96000,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC |
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rate_min = 96000,
+ .rate_max = 96000,
+ .nr_rates = 1,
+ .maxpacksize = 0x0120,
+ .rate_table = (unsigned int[]) {
+ 96000
+ }
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 07bd80f38327..2a4cb5c122b4 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -841,11 +841,13 @@ static int snd_usb_novation_boot_quirk(struct usb_device *dev)
static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
{
int err, actual_length;
-
/* "midi send" enable */
static const u8 seq[] = { 0x4e, 0x73, 0x52, 0x01 };
+ void *buf;
- void *buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL);
+ if (snd_usb_pipe_sanity_check(dev, usb_sndintpipe(dev, 0x05)))
+ return -EINVAL;
+ buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL);
if (!buf)
return -ENOMEM;
err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x05), buf,
@@ -870,7 +872,11 @@ static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
{
- int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ int ret;
+
+ if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0)))
+ return -EINVAL;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1, 0, NULL, 0, 1000);
@@ -977,6 +983,8 @@ static int snd_usb_axefx3_boot_quirk(struct usb_device *dev)
dev_dbg(&dev->dev, "Waiting for Axe-Fx III to boot up...\n");
+ if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0)))
+ return -EINVAL;
/* If the Axe-Fx III has not fully booted, it will timeout when trying
* to enable the audio streaming interface. A more generous timeout is
* used here to detect when the Axe-Fx III has finished booting as the
@@ -1001,6 +1009,109 @@ static int snd_usb_axefx3_boot_quirk(struct usb_device *dev)
return 0;
}
+
+#define MICROBOOK_BUF_SIZE 128
+
+static int snd_usb_motu_microbookii_communicate(struct usb_device *dev, u8 *buf,
+ int buf_size, int *length)
+{
+ int err, actual_length;
+
+ if (snd_usb_pipe_sanity_check(dev, usb_sndintpipe(dev, 0x01)))
+ return -EINVAL;
+ err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x01), buf, *length,
+ &actual_length, 1000);
+ if (err < 0)
+ return err;
+
+ print_hex_dump(KERN_DEBUG, "MicroBookII snd: ", DUMP_PREFIX_NONE, 16, 1,
+ buf, actual_length, false);
+
+ memset(buf, 0, buf_size);
+
+ if (snd_usb_pipe_sanity_check(dev, usb_rcvintpipe(dev, 0x82)))
+ return -EINVAL;
+ err = usb_interrupt_msg(dev, usb_rcvintpipe(dev, 0x82), buf, buf_size,
+ &actual_length, 1000);
+ if (err < 0)
+ return err;
+
+ print_hex_dump(KERN_DEBUG, "MicroBookII rcv: ", DUMP_PREFIX_NONE, 16, 1,
+ buf, actual_length, false);
+
+ *length = actual_length;
+ return 0;
+}
+
+static int snd_usb_motu_microbookii_boot_quirk(struct usb_device *dev)
+{
+ int err, actual_length, poll_attempts = 0;
+ static const u8 set_samplerate_seq[] = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0b, 0x14,
+ 0x00, 0x00, 0x00, 0x01 };
+ static const u8 poll_ready_seq[] = { 0x00, 0x04, 0x00, 0x00,
+ 0x00, 0x00, 0x0b, 0x18 };
+ u8 *buf = kzalloc(MICROBOOK_BUF_SIZE, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ dev_info(&dev->dev, "Waiting for MOTU Microbook II to boot up...\n");
+
+ /* First we tell the device which sample rate to use. */
+ memcpy(buf, set_samplerate_seq, sizeof(set_samplerate_seq));
+ actual_length = sizeof(set_samplerate_seq);
+ err = snd_usb_motu_microbookii_communicate(dev, buf, MICROBOOK_BUF_SIZE,
+ &actual_length);
+
+ if (err < 0) {
+ dev_err(&dev->dev,
+ "failed setting the sample rate for Motu MicroBook II: %d\n",
+ err);
+ goto free_buf;
+ }
+
+ /* Then we poll every 100 ms until the device informs of its readiness. */
+ while (true) {
+ if (++poll_attempts > 100) {
+ dev_err(&dev->dev,
+ "failed booting Motu MicroBook II: timeout\n");
+ err = -ENODEV;
+ goto free_buf;
+ }
+
+ memset(buf, 0, MICROBOOK_BUF_SIZE);
+ memcpy(buf, poll_ready_seq, sizeof(poll_ready_seq));
+
+ actual_length = sizeof(poll_ready_seq);
+ err = snd_usb_motu_microbookii_communicate(
+ dev, buf, MICROBOOK_BUF_SIZE, &actual_length);
+ if (err < 0) {
+ dev_err(&dev->dev,
+ "failed booting Motu MicroBook II: communication error %d\n",
+ err);
+ goto free_buf;
+ }
+
+ /* the device signals its readiness through a message of the
+ * form
+ * XX 06 00 00 00 00 0b 18 00 00 00 01
+ * If the device is not yet ready to accept audio data, the
+ * last byte of that sequence is 00.
+ */
+ if (actual_length == 12 && buf[actual_length - 1] == 1)
+ break;
+
+ msleep(100);
+ }
+
+ dev_info(&dev->dev, "MOTU MicroBook II ready\n");
+
+free_buf:
+ kfree(buf);
+ return err;
+}
+
/*
* Setup quirks
*/
@@ -1178,6 +1289,8 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
return snd_usb_gamecon780_boot_quirk(dev);
case USB_ID(0x2466, 0x8010): /* Fractal Audio Axe-Fx 3 */
return snd_usb_axefx3_boot_quirk(dev);
+ case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */
+ return snd_usb_motu_microbookii_boot_quirk(dev);
}
return 0;
@@ -1480,10 +1593,6 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
/* XMOS based USB DACs */
switch (chip->usb_id) {
case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */
- case USB_ID(0x20b1, 0x0002): /* Wyred 4 Sound DAC-2 DSD */
- case USB_ID(0x20b1, 0x2004): /* Matrix Audio X-SPDIF 2 */
- case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
- case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
case USB_ID(0x22d9, 0x0436): /* OPPO Sonica */
case USB_ID(0x22d9, 0x0461): /* OPPO UDP-205 */
@@ -1493,23 +1602,13 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
- case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
- case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
- case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
+ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
+ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */
- case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
- case USB_ID(0x20b1, 0x2005): /* Denafrips Ares DAC */
- case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
- case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
- case USB_ID(0x20b1, 0x3021): /* Eastern El. MiniMax Tube DAC Supreme */
- case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
- case USB_ID(0x20b1, 0x302d): /* Unison Research Unico CD Due */
- case USB_ID(0x20b1, 0x307b): /* CH Precision C1 DAC */
- case USB_ID(0x20b1, 0x3086): /* Singxer F-1 converter board */
case USB_ID(0x22d9, 0x0426): /* OPPO HA-2 */
case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */
case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
diff --git a/tools/Makefile b/tools/Makefile
index f85bdf1daecb..6a43752f846c 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -16,6 +16,7 @@ help:
@echo ' gpio - GPIO tools'
@echo ' hv - tools used when in Hyper-V clients'
@echo ' iio - IIO tools'
+ @echo ' intel-speed-select - Intel Speed Select tool'
@echo ' kvm_stat - top-like utility for displaying kvm statistics'
@echo ' leds - LEDs tools'
@echo ' lguest - a minimal 32-bit x86 hypervisor'
@@ -78,7 +79,7 @@ perf: FORCE
selftests: FORCE
$(call descend,testing/$@)
-turbostat x86_energy_perf_policy: FORCE
+turbostat x86_energy_perf_policy intel-speed-select: FORCE
$(call descend,power/x86/$@)
tmon: FORCE
@@ -107,7 +108,7 @@ cgroup_install firewire_install gpio_install hv_install lguest_install perf_inst
selftests_install:
$(call descend,testing/$(@:_install=),install)
-turbostat_install x86_energy_perf_policy_install:
+turbostat_install x86_energy_perf_policy_install intel-speed-select_install:
$(call descend,power/x86/$(@:_install=),install)
tmon_install:
@@ -123,7 +124,8 @@ install: acpi_install cgroup_install cpupower_install gpio_install \
hv_install firewire_install lguest_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
- tmon_install freefall_install objtool_install kvm_stat_install
+ tmon_install freefall_install objtool_install kvm_stat_install \
+ intel-speed-select_install
acpi_clean:
$(call descend,power/acpi,clean)
@@ -153,7 +155,7 @@ perf_clean:
selftests_clean:
$(call descend,testing/$(@:_clean=),clean)
-turbostat_clean x86_energy_perf_policy_clean:
+turbostat_clean x86_energy_perf_policy_clean intel-speed-select_clean:
$(call descend,power/x86/$(@:_clean=),clean)
tmon_clean:
@@ -169,6 +171,7 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean lguest_cle
perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
- gpio_clean objtool_clean leds_clean
+ gpio_clean objtool_clean leds_clean \
+ intel-speed-select_clean
.PHONY: FORCE
diff --git a/tools/power/x86/intel-speed-select/Build b/tools/power/x86/intel-speed-select/Build
new file mode 100644
index 000000000000..b61456d75190
--- /dev/null
+++ b/tools/power/x86/intel-speed-select/Build
@@ -0,0 +1 @@
+intel-speed-select-y += isst-config.o isst-core.o isst-display.o
diff --git a/tools/power/x86/intel-speed-select/Makefile b/tools/power/x86/intel-speed-select/Makefile
new file mode 100644
index 000000000000..12c6939dca2a
--- /dev/null
+++ b/tools/power/x86/intel-speed-select/Makefile
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+include ../../../scripts/Makefile.include
+
+bindir ?= /usr/bin
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+# Do not use make's built-in rules
+# (this improves performance and avoids hard-to-debug behaviour);
+MAKEFLAGS += -r
+
+override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ALL_TARGETS := intel-speed-select
+ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
+
+all: $(ALL_PROGRAMS)
+
+export srctree OUTPUT CC LD CFLAGS
+include $(srctree)/tools/build/Makefile.include
+
+#
+# We need the following to be outside of kernel tree
+#
+$(OUTPUT)include/linux/isst_if.h: ../../../../include/uapi/linux/isst_if.h
+ mkdir -p $(OUTPUT)include/linux 2>&1 || true
+ ln -sf $(CURDIR)/../../../../include/uapi/linux/isst_if.h $@
+
+prepare: $(OUTPUT)include/linux/isst_if.h
+
+ISST_IN := $(OUTPUT)intel-speed-select-in.o
+
+$(ISST_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=intel-speed-select
+$(OUTPUT)intel-speed-select: $(ISST_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+
+clean:
+ rm -f $(ALL_PROGRAMS)
+ rm -rf $(OUTPUT)include/linux/isst_if.h
+ find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+
+install: $(ALL_PROGRAMS)
+ install -d -m 755 $(DESTDIR)$(bindir); \
+ for program in $(ALL_PROGRAMS); do \
+ install $$program $(DESTDIR)$(bindir); \
+ done
+
+FORCE:
+
+.PHONY: all install clean FORCE prepare
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
new file mode 100644
index 000000000000..91c5ad1685a1
--- /dev/null
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -0,0 +1,1607 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select -- Enumerate and control features
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include <linux/isst_if.h>
+
+#include "isst.h"
+
+struct process_cmd_struct {
+ char *feature;
+ char *command;
+ void (*process_fn)(void);
+};
+
+static const char *version_str = "v1.0";
+static const int supported_api_ver = 1;
+static struct isst_if_platform_info isst_platform_info;
+static char *progname;
+static int debug_flag;
+static FILE *outf;
+
+static int cpu_model;
+
+#define MAX_CPUS_IN_ONE_REQ 64
+static short max_target_cpus;
+static unsigned short target_cpus[MAX_CPUS_IN_ONE_REQ];
+
+static int topo_max_cpus;
+static size_t present_cpumask_size;
+static cpu_set_t *present_cpumask;
+static size_t target_cpumask_size;
+static cpu_set_t *target_cpumask;
+static int tdp_level = 0xFF;
+static int fact_bucket = 0xFF;
+static int fact_avx = 0xFF;
+static unsigned long long fact_trl;
+static int out_format_json;
+static int cmd_help;
+
+/* clos related */
+static int current_clos = -1;
+static int clos_epp = -1;
+static int clos_prop_prio = -1;
+static int clos_min = -1;
+static int clos_max = -1;
+static int clos_desired = -1;
+static int clos_priority_type;
+
+struct _cpu_map {
+ unsigned short core_id;
+ unsigned short pkg_id;
+ unsigned short die_id;
+ unsigned short punit_cpu;
+ unsigned short punit_cpu_core;
+};
+struct _cpu_map *cpu_map;
+
+void debug_printf(const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+
+ if (debug_flag)
+ vprintf(format, args);
+
+ va_end(args);
+}
+
+static void update_cpu_model(void)
+{
+ unsigned int ebx, ecx, edx;
+ unsigned int fms, family;
+
+ __cpuid(1, fms, ebx, ecx, edx);
+ family = (fms >> 8) & 0xf;
+ cpu_model = (fms >> 4) & 0xf;
+ if (family == 6 || family == 0xf)
+ cpu_model += ((fms >> 16) & 0xf) << 4;
+}
+
+/* Open a file, and exit on failure */
+static FILE *fopen_or_exit(const char *path, const char *mode)
+{
+ FILE *filep = fopen(path, mode);
+
+ if (!filep)
+ err(1, "%s: open failed", path);
+
+ return filep;
+}
+
+/* Parse a file containing a single int */
+static int parse_int_file(int fatal, const char *fmt, ...)
+{
+ va_list args;
+ char path[PATH_MAX];
+ FILE *filep;
+ int value;
+
+ va_start(args, fmt);
+ vsnprintf(path, sizeof(path), fmt, args);
+ va_end(args);
+ if (fatal) {
+ filep = fopen_or_exit(path, "r");
+ } else {
+ filep = fopen(path, "r");
+ if (!filep)
+ return -1;
+ }
+ if (fscanf(filep, "%d", &value) != 1)
+ err(1, "%s: failed to parse number from file", path);
+ fclose(filep);
+
+ return value;
+}
+
+int cpufreq_sysfs_present(void)
+{
+ DIR *dir;
+
+ dir = opendir("/sys/devices/system/cpu/cpu0/cpufreq");
+ if (dir) {
+ closedir(dir);
+ return 1;
+ }
+
+ return 0;
+}
+
+int out_format_is_json(void)
+{
+ return out_format_json;
+}
+
+int get_physical_package_id(int cpu)
+{
+ return parse_int_file(
+ 1, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id",
+ cpu);
+}
+
+int get_physical_core_id(int cpu)
+{
+ return parse_int_file(
+ 1, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
+}
+
+int get_physical_die_id(int cpu)
+{
+ int ret;
+
+ ret = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/topology/die_id",
+ cpu);
+ if (ret < 0)
+ ret = 0;
+
+ return ret;
+}
+
+int get_topo_max_cpus(void)
+{
+ return topo_max_cpus;
+}
+
+#define MAX_PACKAGE_COUNT 8
+#define MAX_DIE_PER_PACKAGE 2
+static void for_each_online_package_in_set(void (*callback)(int, void *, void *,
+ void *, void *),
+ void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int max_packages[MAX_PACKAGE_COUNT * MAX_PACKAGE_COUNT];
+ int pkg_index = 0, i;
+
+ memset(max_packages, 0xff, sizeof(max_packages));
+ for (i = 0; i < topo_max_cpus; ++i) {
+ int j, online, pkg_id, die_id = 0, skip = 0;
+
+ if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask))
+ continue;
+ if (i)
+ online = parse_int_file(
+ 1, "/sys/devices/system/cpu/cpu%d/online", i);
+ else
+ online =
+ 1; /* online entry for CPU 0 needs some special configs */
+
+ die_id = get_physical_die_id(i);
+ if (die_id < 0)
+ die_id = 0;
+ pkg_id = get_physical_package_id(i);
+ /* Create an unique id for package, die combination to store */
+ pkg_id = (MAX_PACKAGE_COUNT * pkg_id + die_id);
+
+ for (j = 0; j < pkg_index; ++j) {
+ if (max_packages[j] == pkg_id) {
+ skip = 1;
+ break;
+ }
+ }
+
+ if (!skip && online && callback) {
+ callback(i, arg1, arg2, arg3, arg4);
+ max_packages[pkg_index++] = pkg_id;
+ }
+ }
+}
+
+static void for_each_online_target_cpu_in_set(
+ void (*callback)(int, void *, void *, void *, void *), void *arg1,
+ void *arg2, void *arg3, void *arg4)
+{
+ int i;
+
+ for (i = 0; i < topo_max_cpus; ++i) {
+ int online;
+
+ if (!CPU_ISSET_S(i, target_cpumask_size, target_cpumask))
+ continue;
+ if (i)
+ online = parse_int_file(
+ 1, "/sys/devices/system/cpu/cpu%d/online", i);
+ else
+ online =
+ 1; /* online entry for CPU 0 needs some special configs */
+
+ if (online && callback)
+ callback(i, arg1, arg2, arg3, arg4);
+ }
+}
+
+#define BITMASK_SIZE 32
+static void set_max_cpu_num(void)
+{
+ FILE *filep;
+ unsigned long dummy;
+
+ topo_max_cpus = 0;
+ filep = fopen_or_exit(
+ "/sys/devices/system/cpu/cpu0/topology/thread_siblings", "r");
+ while (fscanf(filep, "%lx,", &dummy) == 1)
+ topo_max_cpus += BITMASK_SIZE;
+ fclose(filep);
+ topo_max_cpus--; /* 0 based */
+
+ debug_printf("max cpus %d\n", topo_max_cpus);
+}
+
+size_t alloc_cpu_set(cpu_set_t **cpu_set)
+{
+ cpu_set_t *_cpu_set;
+ size_t size;
+
+ _cpu_set = CPU_ALLOC((topo_max_cpus + 1));
+ if (_cpu_set == NULL)
+ err(3, "CPU_ALLOC");
+ size = CPU_ALLOC_SIZE((topo_max_cpus + 1));
+ CPU_ZERO_S(size, _cpu_set);
+
+ *cpu_set = _cpu_set;
+ return size;
+}
+
+void free_cpu_set(cpu_set_t *cpu_set)
+{
+ CPU_FREE(cpu_set);
+}
+
+static int cpu_cnt[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE];
+static void set_cpu_present_cpu_mask(void)
+{
+ size_t size;
+ DIR *dir;
+ int i;
+
+ size = alloc_cpu_set(&present_cpumask);
+ present_cpumask_size = size;
+ for (i = 0; i < topo_max_cpus; ++i) {
+ char buffer[256];
+
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d", i);
+ dir = opendir(buffer);
+ if (dir) {
+ int pkg_id, die_id;
+
+ CPU_SET_S(i, size, present_cpumask);
+ die_id = get_physical_die_id(i);
+ if (die_id < 0)
+ die_id = 0;
+
+ pkg_id = get_physical_package_id(i);
+ if (pkg_id < MAX_PACKAGE_COUNT &&
+ die_id < MAX_DIE_PER_PACKAGE)
+ cpu_cnt[pkg_id][die_id]++;
+ }
+ closedir(dir);
+ }
+}
+
+int get_cpu_count(int pkg_id, int die_id)
+{
+ if (pkg_id < MAX_PACKAGE_COUNT && die_id < MAX_DIE_PER_PACKAGE)
+ return cpu_cnt[pkg_id][die_id] + 1;
+
+ return 0;
+}
+
+static void set_cpu_target_cpu_mask(void)
+{
+ size_t size;
+ int i;
+
+ size = alloc_cpu_set(&target_cpumask);
+ target_cpumask_size = size;
+ for (i = 0; i < max_target_cpus; ++i) {
+ if (!CPU_ISSET_S(target_cpus[i], present_cpumask_size,
+ present_cpumask))
+ continue;
+
+ CPU_SET_S(target_cpus[i], size, target_cpumask);
+ }
+}
+
+static void create_cpu_map(void)
+{
+ const char *pathname = "/dev/isst_interface";
+ int i, fd = 0;
+ struct isst_if_cpu_maps map;
+
+ cpu_map = malloc(sizeof(*cpu_map) * topo_max_cpus);
+ if (!cpu_map)
+ err(3, "cpumap");
+
+ fd = open(pathname, O_RDWR);
+ if (fd < 0)
+ err(-1, "%s open failed", pathname);
+
+ for (i = 0; i < topo_max_cpus; ++i) {
+ if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask))
+ continue;
+
+ map.cmd_count = 1;
+ map.cpu_map[0].logical_cpu = i;
+
+ debug_printf(" map logical_cpu:%d\n",
+ map.cpu_map[0].logical_cpu);
+ if (ioctl(fd, ISST_IF_GET_PHY_ID, &map) == -1) {
+ perror("ISST_IF_GET_PHY_ID");
+ fprintf(outf, "Error: map logical_cpu:%d\n",
+ map.cpu_map[0].logical_cpu);
+ continue;
+ }
+ cpu_map[i].core_id = get_physical_core_id(i);
+ cpu_map[i].pkg_id = get_physical_package_id(i);
+ cpu_map[i].die_id = get_physical_die_id(i);
+ cpu_map[i].punit_cpu = map.cpu_map[0].physical_cpu;
+ cpu_map[i].punit_cpu_core = (map.cpu_map[0].physical_cpu >>
+ 1); // shift to get core id
+
+ debug_printf(
+ "map logical_cpu:%d core: %d die:%d pkg:%d punit_cpu:%d punit_core:%d\n",
+ i, cpu_map[i].core_id, cpu_map[i].die_id,
+ cpu_map[i].pkg_id, cpu_map[i].punit_cpu,
+ cpu_map[i].punit_cpu_core);
+ }
+
+ if (fd)
+ close(fd);
+}
+
+int find_logical_cpu(int pkg_id, int die_id, int punit_core_id)
+{
+ int i;
+
+ for (i = 0; i < topo_max_cpus; ++i) {
+ if (cpu_map[i].pkg_id == pkg_id &&
+ cpu_map[i].die_id == die_id &&
+ cpu_map[i].punit_cpu_core == punit_core_id)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+void set_cpu_mask_from_punit_coremask(int cpu, unsigned long long core_mask,
+ size_t core_cpumask_size,
+ cpu_set_t *core_cpumask, int *cpu_cnt)
+{
+ int i, cnt = 0;
+ int die_id, pkg_id;
+
+ *cpu_cnt = 0;
+ die_id = get_physical_die_id(cpu);
+ pkg_id = get_physical_package_id(cpu);
+
+ for (i = 0; i < 64; ++i) {
+ if (core_mask & BIT(i)) {
+ int j;
+
+ for (j = 0; j < topo_max_cpus; ++j) {
+ if (cpu_map[j].pkg_id == pkg_id &&
+ cpu_map[j].die_id == die_id &&
+ cpu_map[j].punit_cpu_core == i) {
+ CPU_SET_S(j, core_cpumask_size,
+ core_cpumask);
+ ++cnt;
+ }
+ }
+ }
+ }
+
+ *cpu_cnt = cnt;
+}
+
+int find_phy_core_num(int logical_cpu)
+{
+ if (logical_cpu < topo_max_cpus)
+ return cpu_map[logical_cpu].punit_cpu_core;
+
+ return -EINVAL;
+}
+
+static int isst_send_mmio_command(unsigned int cpu, unsigned int reg, int write,
+ unsigned int *value)
+{
+ struct isst_if_io_regs io_regs;
+ const char *pathname = "/dev/isst_interface";
+ int cmd;
+ int fd;
+
+ debug_printf("mmio_cmd cpu:%d reg:%d write:%d\n", cpu, reg, write);
+
+ fd = open(pathname, O_RDWR);
+ if (fd < 0)
+ err(-1, "%s open failed", pathname);
+
+ io_regs.req_count = 1;
+ io_regs.io_reg[0].logical_cpu = cpu;
+ io_regs.io_reg[0].reg = reg;
+ cmd = ISST_IF_IO_CMD;
+ if (write) {
+ io_regs.io_reg[0].read_write = 1;
+ io_regs.io_reg[0].value = *value;
+ } else {
+ io_regs.io_reg[0].read_write = 0;
+ }
+
+ if (ioctl(fd, cmd, &io_regs) == -1) {
+ perror("ISST_IF_IO_CMD");
+ fprintf(outf, "Error: mmio_cmd cpu:%d reg:%x read_write:%x\n",
+ cpu, reg, write);
+ } else {
+ if (!write)
+ *value = io_regs.io_reg[0].value;
+
+ debug_printf(
+ "mmio_cmd response: cpu:%d reg:%x rd_write:%x resp:%x\n",
+ cpu, reg, write, *value);
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+int isst_send_mbox_command(unsigned int cpu, unsigned char command,
+ unsigned char sub_command, unsigned int parameter,
+ unsigned int req_data, unsigned int *resp)
+{
+ const char *pathname = "/dev/isst_interface";
+ int fd;
+ struct isst_if_mbox_cmds mbox_cmds = { 0 };
+
+ debug_printf(
+ "mbox_send: cpu:%d command:%x sub_command:%x parameter:%x req_data:%x\n",
+ cpu, command, sub_command, parameter, req_data);
+
+ if (isst_platform_info.mmio_supported && command == CONFIG_CLOS) {
+ unsigned int value;
+ int write = 0;
+ int clos_id, core_id, ret = 0;
+
+ debug_printf("CLOS %d\n", cpu);
+
+ if (parameter & BIT(MBOX_CMD_WRITE_BIT)) {
+ value = req_data;
+ write = 1;
+ }
+
+ switch (sub_command) {
+ case CLOS_PQR_ASSOC:
+ core_id = parameter & 0xff;
+ ret = isst_send_mmio_command(
+ cpu, PQR_ASSOC_OFFSET + core_id * 4, write,
+ &value);
+ if (!ret && !write)
+ *resp = value;
+ break;
+ case CLOS_PM_CLOS:
+ clos_id = parameter & 0x03;
+ ret = isst_send_mmio_command(
+ cpu, PM_CLOS_OFFSET + clos_id * 4, write,
+ &value);
+ if (!ret && !write)
+ *resp = value;
+ break;
+ case CLOS_PM_QOS_CONFIG:
+ ret = isst_send_mmio_command(cpu, PM_QOS_CONFIG_OFFSET,
+ write, &value);
+ if (!ret && !write)
+ *resp = value;
+ break;
+ case CLOS_STATUS:
+ break;
+ default:
+ break;
+ }
+ return ret;
+ }
+
+ mbox_cmds.cmd_count = 1;
+ mbox_cmds.mbox_cmd[0].logical_cpu = cpu;
+ mbox_cmds.mbox_cmd[0].command = command;
+ mbox_cmds.mbox_cmd[0].sub_command = sub_command;
+ mbox_cmds.mbox_cmd[0].parameter = parameter;
+ mbox_cmds.mbox_cmd[0].req_data = req_data;
+
+ fd = open(pathname, O_RDWR);
+ if (fd < 0)
+ err(-1, "%s open failed", pathname);
+
+ if (ioctl(fd, ISST_IF_MBOX_COMMAND, &mbox_cmds) == -1) {
+ perror("ISST_IF_MBOX_COMMAND");
+ fprintf(outf,
+ "Error: mbox_cmd cpu:%d command:%x sub_command:%x parameter:%x req_data:%x\n",
+ cpu, command, sub_command, parameter, req_data);
+ } else {
+ *resp = mbox_cmds.mbox_cmd[0].resp_data;
+ debug_printf(
+ "mbox_cmd response: cpu:%d command:%x sub_command:%x parameter:%x req_data:%x resp:%x\n",
+ cpu, command, sub_command, parameter, req_data, *resp);
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+int isst_send_msr_command(unsigned int cpu, unsigned int msr, int write,
+ unsigned long long *req_resp)
+{
+ struct isst_if_msr_cmds msr_cmds;
+ const char *pathname = "/dev/isst_interface";
+ int fd;
+
+ fd = open(pathname, O_RDWR);
+ if (fd < 0)
+ err(-1, "%s open failed", pathname);
+
+ msr_cmds.cmd_count = 1;
+ msr_cmds.msr_cmd[0].logical_cpu = cpu;
+ msr_cmds.msr_cmd[0].msr = msr;
+ msr_cmds.msr_cmd[0].read_write = write;
+ if (write)
+ msr_cmds.msr_cmd[0].data = *req_resp;
+
+ if (ioctl(fd, ISST_IF_MSR_COMMAND, &msr_cmds) == -1) {
+ perror("ISST_IF_MSR_COMMAD");
+ fprintf(outf, "Error: msr_cmd cpu:%d msr:%x read_write:%d\n",
+ cpu, msr, write);
+ } else {
+ if (!write)
+ *req_resp = msr_cmds.msr_cmd[0].data;
+
+ debug_printf(
+ "msr_cmd response: cpu:%d msr:%x rd_write:%x resp:%llx %llx\n",
+ cpu, msr, write, *req_resp, msr_cmds.msr_cmd[0].data);
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+static int isst_fill_platform_info(void)
+{
+ const char *pathname = "/dev/isst_interface";
+ int fd;
+
+ fd = open(pathname, O_RDWR);
+ if (fd < 0)
+ err(-1, "%s open failed", pathname);
+
+ if (ioctl(fd, ISST_IF_GET_PLATFORM_INFO, &isst_platform_info) == -1) {
+ perror("ISST_IF_GET_PLATFORM_INFO");
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+static void isst_print_platform_information(void)
+{
+ struct isst_if_platform_info platform_info;
+ const char *pathname = "/dev/isst_interface";
+ int fd;
+
+ fd = open(pathname, O_RDWR);
+ if (fd < 0)
+ err(-1, "%s open failed", pathname);
+
+ if (ioctl(fd, ISST_IF_GET_PLATFORM_INFO, &platform_info) == -1) {
+ perror("ISST_IF_GET_PLATFORM_INFO");
+ } else {
+ fprintf(outf, "Platform: API version : %d\n",
+ platform_info.api_version);
+ fprintf(outf, "Platform: Driver version : %d\n",
+ platform_info.driver_version);
+ fprintf(outf, "Platform: mbox supported : %d\n",
+ platform_info.mbox_supported);
+ fprintf(outf, "Platform: mmio supported : %d\n",
+ platform_info.mmio_supported);
+ }
+
+ close(fd);
+
+ exit(0);
+}
+
+static void exec_on_get_ctdp_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int (*fn_ptr)(int cpu, void *arg);
+ int ret;
+
+ fn_ptr = arg1;
+ ret = fn_ptr(cpu, arg2);
+ if (ret)
+ perror("get_tdp_*");
+ else
+ isst_display_result(cpu, outf, "perf-profile", (char *)arg3,
+ *(unsigned int *)arg4);
+}
+
+#define _get_tdp_level(desc, suffix, object, help) \
+ static void get_tdp_##object(void) \
+ { \
+ struct isst_pkg_ctdp ctdp; \
+\
+ if (cmd_help) { \
+ fprintf(stderr, \
+ "Print %s [No command arguments are required]\n", \
+ help); \
+ exit(0); \
+ } \
+ isst_ctdp_display_information_start(outf); \
+ if (max_target_cpus) \
+ for_each_online_target_cpu_in_set( \
+ exec_on_get_ctdp_cpu, isst_get_ctdp_##suffix, \
+ &ctdp, desc, &ctdp.object); \
+ else \
+ for_each_online_package_in_set(exec_on_get_ctdp_cpu, \
+ isst_get_ctdp_##suffix, \
+ &ctdp, desc, \
+ &ctdp.object); \
+ isst_ctdp_display_information_end(outf); \
+ }
+
+_get_tdp_level("get-config-levels", levels, levels, "TDP levels");
+_get_tdp_level("get-config-version", levels, version, "TDP version");
+_get_tdp_level("get-config-enabled", levels, enabled, "TDP enable status");
+_get_tdp_level("get-config-current_level", levels, current_level,
+ "Current TDP Level");
+_get_tdp_level("get-lock-status", levels, locked, "TDP lock status");
+
+static void dump_isst_config_for_cpu(int cpu, void *arg1, void *arg2,
+ void *arg3, void *arg4)
+{
+ struct isst_pkg_ctdp pkg_dev;
+ int ret;
+
+ memset(&pkg_dev, 0, sizeof(pkg_dev));
+ ret = isst_get_process_ctdp(cpu, tdp_level, &pkg_dev);
+ if (ret) {
+ perror("isst_get_process_ctdp");
+ } else {
+ isst_ctdp_display_information(cpu, outf, tdp_level, &pkg_dev);
+ isst_get_process_ctdp_complete(cpu, &pkg_dev);
+ }
+}
+
+static void dump_isst_config(void)
+{
+ if (cmd_help) {
+ fprintf(stderr,
+ "Print Intel(R) Speed Select Technology Performance profile configuration\n");
+ fprintf(stderr,
+ "including base frequency and turbo frequency configurations\n");
+ fprintf(stderr, "Optional: -l|--level : Specify tdp level\n");
+ fprintf(stderr,
+ "\tIf no arguments, dump information for all TDP levels\n");
+ exit(0);
+ }
+
+ isst_ctdp_display_information_start(outf);
+
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(dump_isst_config_for_cpu,
+ NULL, NULL, NULL, NULL);
+ else
+ for_each_online_package_in_set(dump_isst_config_for_cpu, NULL,
+ NULL, NULL, NULL);
+
+ isst_ctdp_display_information_end(outf);
+}
+
+static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int ret;
+
+ ret = isst_set_tdp_level(cpu, tdp_level);
+ if (ret)
+ perror("set_tdp_level_for_cpu");
+ else
+ isst_display_result(cpu, outf, "perf-profile", "set_tdp_level",
+ ret);
+}
+
+static void set_tdp_level(void)
+{
+ if (cmd_help) {
+ fprintf(stderr, "Set Config TDP level\n");
+ fprintf(stderr,
+ "\t Arguments: -l|--level : Specify tdp level\n");
+ exit(0);
+ }
+
+ if (tdp_level == 0xff) {
+ fprintf(outf, "Invalid command: specify tdp_level\n");
+ exit(1);
+ }
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(set_tdp_level_for_cpu, NULL,
+ NULL, NULL, NULL);
+ else
+ for_each_online_package_in_set(set_tdp_level_for_cpu, NULL,
+ NULL, NULL, NULL);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void dump_pbf_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ struct isst_pbf_info pbf_info;
+ int ret;
+
+ ret = isst_get_pbf_info(cpu, tdp_level, &pbf_info);
+ if (ret) {
+ perror("isst_get_pbf_info");
+ } else {
+ isst_pbf_display_information(cpu, outf, tdp_level, &pbf_info);
+ isst_get_pbf_info_complete(&pbf_info);
+ }
+}
+
+static void dump_pbf_config(void)
+{
+ if (cmd_help) {
+ fprintf(stderr,
+ "Print Intel(R) Speed Select Technology base frequency configuration for a TDP level\n");
+ fprintf(stderr,
+ "\tArguments: -l|--level : Specify tdp level\n");
+ exit(0);
+ }
+
+ if (tdp_level == 0xff) {
+ fprintf(outf, "Invalid command: specify tdp_level\n");
+ exit(1);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(dump_pbf_config_for_cpu, NULL,
+ NULL, NULL, NULL);
+ else
+ for_each_online_package_in_set(dump_pbf_config_for_cpu, NULL,
+ NULL, NULL, NULL);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void set_pbf_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int ret;
+ int status = *(int *)arg4;
+
+ ret = isst_set_pbf_fact_status(cpu, 1, status);
+ if (ret) {
+ perror("isst_set_pbf");
+ } else {
+ if (status)
+ isst_display_result(cpu, outf, "base-freq", "enable",
+ ret);
+ else
+ isst_display_result(cpu, outf, "base-freq", "disable",
+ ret);
+ }
+}
+
+static void set_pbf_enable(void)
+{
+ int status = 1;
+
+ if (cmd_help) {
+ fprintf(stderr,
+ "Enable Intel Speed Select Technology base frequency feature [No command arguments are required]\n");
+ exit(0);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(set_pbf_for_cpu, NULL, NULL,
+ NULL, &status);
+ else
+ for_each_online_package_in_set(set_pbf_for_cpu, NULL, NULL,
+ NULL, &status);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void set_pbf_disable(void)
+{
+ int status = 0;
+
+ if (cmd_help) {
+ fprintf(stderr,
+ "Disable Intel Speed Select Technology base frequency feature [No command arguments are required]\n");
+ exit(0);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(set_pbf_for_cpu, NULL, NULL,
+ NULL, &status);
+ else
+ for_each_online_package_in_set(set_pbf_for_cpu, NULL, NULL,
+ NULL, &status);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void dump_fact_config_for_cpu(int cpu, void *arg1, void *arg2,
+ void *arg3, void *arg4)
+{
+ struct isst_fact_info fact_info;
+ int ret;
+
+ ret = isst_get_fact_info(cpu, tdp_level, &fact_info);
+ if (ret)
+ perror("isst_get_fact_bucket_info");
+ else
+ isst_fact_display_information(cpu, outf, tdp_level, fact_bucket,
+ fact_avx, &fact_info);
+}
+
+static void dump_fact_config(void)
+{
+ if (cmd_help) {
+ fprintf(stderr,
+ "Print complete Intel Speed Select Technology turbo frequency configuration for a TDP level. Other arguments are optional.\n");
+ fprintf(stderr,
+ "\tArguments: -l|--level : Specify tdp level\n");
+ fprintf(stderr,
+ "\tArguments: -b|--bucket : Bucket index to dump\n");
+ fprintf(stderr,
+ "\tArguments: -r|--trl-type : Specify trl type: sse|avx2|avx512\n");
+ exit(0);
+ }
+
+ if (tdp_level == 0xff) {
+ fprintf(outf, "Invalid command: specify tdp_level\n");
+ exit(1);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(dump_fact_config_for_cpu,
+ NULL, NULL, NULL, NULL);
+ else
+ for_each_online_package_in_set(dump_fact_config_for_cpu, NULL,
+ NULL, NULL, NULL);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void set_fact_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int ret;
+ int status = *(int *)arg4;
+
+ ret = isst_set_pbf_fact_status(cpu, 0, status);
+ if (ret)
+ perror("isst_set_fact");
+ else {
+ if (status) {
+ struct isst_pkg_ctdp pkg_dev;
+
+ ret = isst_get_ctdp_levels(cpu, &pkg_dev);
+ if (ret) {
+ isst_display_result(cpu, outf, "turbo-freq",
+ "enable", ret);
+ return;
+ }
+ ret = isst_set_trl(cpu, fact_trl);
+ isst_display_result(cpu, outf, "turbo-freq", "enable",
+ ret);
+ } else {
+ /* Since we modified TRL during Fact enable, restore it */
+ isst_set_trl_from_current_tdp(cpu, fact_trl);
+ isst_display_result(cpu, outf, "turbo-freq", "disable",
+ ret);
+ }
+ }
+}
+
+static void set_fact_enable(void)
+{
+ int status = 1;
+
+ if (cmd_help) {
+ fprintf(stderr,
+ "Enable Intel Speed Select Technology Turbo frequency feature\n");
+ fprintf(stderr,
+ "Optional: -t|--trl : Specify turbo ratio limit\n");
+ exit(0);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(set_fact_for_cpu, NULL, NULL,
+ NULL, &status);
+ else
+ for_each_online_package_in_set(set_fact_for_cpu, NULL, NULL,
+ NULL, &status);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void set_fact_disable(void)
+{
+ int status = 0;
+
+ if (cmd_help) {
+ fprintf(stderr,
+ "Disable Intel Speed Select Technology turbo frequency feature\n");
+ fprintf(stderr,
+ "Optional: -t|--trl : Specify turbo ratio limit\n");
+ exit(0);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(set_fact_for_cpu, NULL, NULL,
+ NULL, &status);
+ else
+ for_each_online_package_in_set(set_fact_for_cpu, NULL, NULL,
+ NULL, &status);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void enable_clos_qos_config(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int ret;
+ int status = *(int *)arg4;
+
+ ret = isst_pm_qos_config(cpu, status, clos_priority_type);
+ if (ret) {
+ perror("isst_pm_qos_config");
+ } else {
+ if (status)
+ isst_display_result(cpu, outf, "core-power", "enable",
+ ret);
+ else
+ isst_display_result(cpu, outf, "core-power", "disable",
+ ret);
+ }
+}
+
+static void set_clos_enable(void)
+{
+ int status = 1;
+
+ if (cmd_help) {
+ fprintf(stderr, "Enable core-power for a package/die\n");
+ fprintf(stderr,
+ "\tClos Enable: Specify priority type with [--priority|-p]\n");
+ fprintf(stderr, "\t\t 0: Proportional, 1: Ordered\n");
+ exit(0);
+ }
+
+ if (cpufreq_sysfs_present()) {
+ fprintf(stderr,
+ "cpufreq subsystem and core-power enable will interfere with each other!\n");
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(enable_clos_qos_config, NULL,
+ NULL, NULL, &status);
+ else
+ for_each_online_package_in_set(enable_clos_qos_config, NULL,
+ NULL, NULL, &status);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void set_clos_disable(void)
+{
+ int status = 0;
+
+ if (cmd_help) {
+ fprintf(stderr,
+ "Disable core-power: [No command arguments are required]\n");
+ exit(0);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(enable_clos_qos_config, NULL,
+ NULL, NULL, &status);
+ else
+ for_each_online_package_in_set(enable_clos_qos_config, NULL,
+ NULL, NULL, &status);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void dump_clos_config_for_cpu(int cpu, void *arg1, void *arg2,
+ void *arg3, void *arg4)
+{
+ struct isst_clos_config clos_config;
+ int ret;
+
+ ret = isst_pm_get_clos(cpu, current_clos, &clos_config);
+ if (ret)
+ perror("isst_pm_get_clos");
+ else
+ isst_clos_display_information(cpu, outf, current_clos,
+ &clos_config);
+}
+
+static void dump_clos_config(void)
+{
+ if (cmd_help) {
+ fprintf(stderr,
+ "Print Intel Speed Select Technology core power configuration\n");
+ fprintf(stderr,
+ "\tArguments: [-c | --clos]: Specify clos id\n");
+ exit(0);
+ }
+ if (current_clos < 0 || current_clos > 3) {
+ fprintf(stderr, "Invalid clos id\n");
+ exit(0);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(dump_clos_config_for_cpu,
+ NULL, NULL, NULL, NULL);
+ else
+ for_each_online_package_in_set(dump_clos_config_for_cpu, NULL,
+ NULL, NULL, NULL);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void set_clos_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ struct isst_clos_config clos_config;
+ int ret;
+
+ clos_config.pkg_id = get_physical_package_id(cpu);
+ clos_config.die_id = get_physical_die_id(cpu);
+
+ clos_config.epp = clos_epp;
+ clos_config.clos_prop_prio = clos_prop_prio;
+ clos_config.clos_min = clos_min;
+ clos_config.clos_max = clos_max;
+ clos_config.clos_desired = clos_desired;
+ ret = isst_set_clos(cpu, current_clos, &clos_config);
+ if (ret)
+ perror("isst_set_clos");
+ else
+ isst_display_result(cpu, outf, "core-power", "config", ret);
+}
+
+static void set_clos_config(void)
+{
+ if (cmd_help) {
+ fprintf(stderr,
+ "Set core-power configuration for one of the four clos ids\n");
+ fprintf(stderr,
+ "\tSpecify targeted clos id with [--clos|-c]\n");
+ fprintf(stderr, "\tSpecify clos EPP with [--epp|-e]\n");
+ fprintf(stderr,
+ "\tSpecify clos Proportional Priority [--weight|-w]\n");
+ fprintf(stderr, "\tSpecify clos min with [--min|-n]\n");
+ fprintf(stderr, "\tSpecify clos max with [--max|-m]\n");
+ fprintf(stderr, "\tSpecify clos desired with [--desired|-d]\n");
+ exit(0);
+ }
+
+ if (current_clos < 0 || current_clos > 3) {
+ fprintf(stderr, "Invalid clos id\n");
+ exit(0);
+ }
+ if (clos_epp < 0 || clos_epp > 0x0F) {
+ fprintf(stderr, "clos epp is not specified, default: 0\n");
+ clos_epp = 0;
+ }
+ if (clos_prop_prio < 0 || clos_prop_prio > 0x0F) {
+ fprintf(stderr,
+ "clos frequency weight is not specified, default: 0\n");
+ clos_prop_prio = 0;
+ }
+ if (clos_min < 0) {
+ fprintf(stderr, "clos min is not specified, default: 0\n");
+ clos_min = 0;
+ }
+ if (clos_max < 0) {
+ fprintf(stderr, "clos max is not specified, default: 0xff\n");
+ clos_max = 0xff;
+ }
+ if (clos_desired < 0) {
+ fprintf(stderr, "clos desired is not specified, default: 0\n");
+ clos_desired = 0x00;
+ }
+
+ isst_ctdp_display_information_start(outf);
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(set_clos_config_for_cpu, NULL,
+ NULL, NULL, NULL);
+ else
+ for_each_online_package_in_set(set_clos_config_for_cpu, NULL,
+ NULL, NULL, NULL);
+ isst_ctdp_display_information_end(outf);
+}
+
+static void set_clos_assoc_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int ret;
+
+ ret = isst_clos_associate(cpu, current_clos);
+ if (ret)
+ perror("isst_clos_associate");
+ else
+ isst_display_result(cpu, outf, "core-power", "assoc", ret);
+}
+
+static void set_clos_assoc(void)
+{
+ if (cmd_help) {
+ fprintf(stderr, "Associate a clos id to a CPU\n");
+ fprintf(stderr,
+ "\tSpecify targeted clos id with [--clos|-c]\n");
+ exit(0);
+ }
+
+ if (current_clos < 0 || current_clos > 3) {
+ fprintf(stderr, "Invalid clos id\n");
+ exit(0);
+ }
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(set_clos_assoc_for_cpu, NULL,
+ NULL, NULL, NULL);
+ else {
+ fprintf(stderr,
+ "Invalid target cpu. Specify with [-c|--cpu]\n");
+ }
+}
+
+static void get_clos_assoc_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int clos, ret;
+
+ ret = isst_clos_get_assoc_status(cpu, &clos);
+ if (ret)
+ perror("isst_clos_get_assoc_status");
+ else
+ isst_display_result(cpu, outf, "core-power", "get-assoc", clos);
+}
+
+static void get_clos_assoc(void)
+{
+ if (cmd_help) {
+ fprintf(stderr, "Get associate clos id to a CPU\n");
+ fprintf(stderr, "\tSpecify targeted cpu id with [--cpu|-c]\n");
+ exit(0);
+ }
+ if (max_target_cpus)
+ for_each_online_target_cpu_in_set(get_clos_assoc_for_cpu, NULL,
+ NULL, NULL, NULL);
+ else {
+ fprintf(stderr,
+ "Invalid target cpu. Specify with [-c|--cpu]\n");
+ }
+}
+
+static struct process_cmd_struct isst_cmds[] = {
+ { "perf-profile", "get-lock-status", get_tdp_locked },
+ { "perf-profile", "get-config-levels", get_tdp_levels },
+ { "perf-profile", "get-config-version", get_tdp_version },
+ { "perf-profile", "get-config-enabled", get_tdp_enabled },
+ { "perf-profile", "get-config-current-level", get_tdp_current_level },
+ { "perf-profile", "set-config-level", set_tdp_level },
+ { "perf-profile", "info", dump_isst_config },
+ { "base-freq", "info", dump_pbf_config },
+ { "base-freq", "enable", set_pbf_enable },
+ { "base-freq", "disable", set_pbf_disable },
+ { "turbo-freq", "info", dump_fact_config },
+ { "turbo-freq", "enable", set_fact_enable },
+ { "turbo-freq", "disable", set_fact_disable },
+ { "core-power", "info", dump_clos_config },
+ { "core-power", "enable", set_clos_enable },
+ { "core-power", "disable", set_clos_disable },
+ { "core-power", "config", set_clos_config },
+ { "core-power", "assoc", set_clos_assoc },
+ { "core-power", "get-assoc", get_clos_assoc },
+ { NULL, NULL, NULL }
+};
+
+/*
+ * parse cpuset with following syntax
+ * 1,2,4..6,8-10 and set bits in cpu_subset
+ */
+void parse_cpu_command(char *optarg)
+{
+ unsigned int start, end;
+ char *next;
+
+ next = optarg;
+
+ while (next && *next) {
+ if (*next == '-') /* no negative cpu numbers */
+ goto error;
+
+ start = strtoul(next, &next, 10);
+
+ if (max_target_cpus < MAX_CPUS_IN_ONE_REQ)
+ target_cpus[max_target_cpus++] = start;
+
+ if (*next == '\0')
+ break;
+
+ if (*next == ',') {
+ next += 1;
+ continue;
+ }
+
+ if (*next == '-') {
+ next += 1; /* start range */
+ } else if (*next == '.') {
+ next += 1;
+ if (*next == '.')
+ next += 1; /* start range */
+ else
+ goto error;
+ }
+
+ end = strtoul(next, &next, 10);
+ if (end <= start)
+ goto error;
+
+ while (++start <= end) {
+ if (max_target_cpus < MAX_CPUS_IN_ONE_REQ)
+ target_cpus[max_target_cpus++] = start;
+ }
+
+ if (*next == ',')
+ next += 1;
+ else if (*next != '\0')
+ goto error;
+ }
+
+#ifdef DEBUG
+ {
+ int i;
+
+ for (i = 0; i < max_target_cpus; ++i)
+ printf("cpu [%d] in arg\n", target_cpus[i]);
+ }
+#endif
+ return;
+
+error:
+ fprintf(stderr, "\"--cpu %s\" malformed\n", optarg);
+ exit(-1);
+}
+
+static void parse_cmd_args(int argc, int start, char **argv)
+{
+ int opt;
+ int option_index;
+
+ static struct option long_options[] = {
+ { "bucket", required_argument, 0, 'b' },
+ { "level", required_argument, 0, 'l' },
+ { "trl-type", required_argument, 0, 'r' },
+ { "trl", required_argument, 0, 't' },
+ { "help", no_argument, 0, 'h' },
+ { "clos", required_argument, 0, 'c' },
+ { "desired", required_argument, 0, 'd' },
+ { "epp", required_argument, 0, 'e' },
+ { "min", required_argument, 0, 'n' },
+ { "max", required_argument, 0, 'm' },
+ { "priority", required_argument, 0, 'p' },
+ { "weight", required_argument, 0, 'w' },
+ { 0, 0, 0, 0 }
+ };
+
+ option_index = start;
+
+ optind = start + 1;
+ while ((opt = getopt_long(argc, argv, "b:l:t:c:d:e:n:m:p:w:h",
+ long_options, &option_index)) != -1) {
+ switch (opt) {
+ case 'b':
+ fact_bucket = atoi(optarg);
+ break;
+ case 'h':
+ cmd_help = 1;
+ break;
+ case 'l':
+ tdp_level = atoi(optarg);
+ break;
+ case 't':
+ sscanf(optarg, "0x%llx", &fact_trl);
+ break;
+ case 'r':
+ if (!strncmp(optarg, "sse", 3)) {
+ fact_avx = 0x01;
+ } else if (!strncmp(optarg, "avx2", 4)) {
+ fact_avx = 0x02;
+ } else if (!strncmp(optarg, "avx512", 4)) {
+ fact_avx = 0x04;
+ } else {
+ fprintf(outf, "Invalid sse,avx options\n");
+ exit(1);
+ }
+ break;
+ /* CLOS related */
+ case 'c':
+ current_clos = atoi(optarg);
+ printf("clos %d\n", current_clos);
+ break;
+ case 'd':
+ clos_desired = atoi(optarg);
+ break;
+ case 'e':
+ clos_epp = atoi(optarg);
+ break;
+ case 'n':
+ clos_min = atoi(optarg);
+ break;
+ case 'm':
+ clos_max = atoi(optarg);
+ break;
+ case 'p':
+ clos_priority_type = atoi(optarg);
+ break;
+ case 'w':
+ clos_prop_prio = atoi(optarg);
+ break;
+ default:
+ printf("no match\n");
+ }
+ }
+}
+
+static void isst_help(void)
+{
+ printf("perf-profile:\tAn architectural mechanism that allows multiple optimized \n\
+ performance profiles per system via static and/or dynamic\n\
+ adjustment of core count, workload, Tjmax, and\n\
+ TDP, etc.\n");
+ printf("\nCommands : For feature=perf-profile\n");
+ printf("\tinfo\n");
+ printf("\tget-lock-status\n");
+ printf("\tget-config-levels\n");
+ printf("\tget-config-version\n");
+ printf("\tget-config-enabled\n");
+ printf("\tget-config-current-level\n");
+ printf("\tset-config-level\n");
+}
+
+static void pbf_help(void)
+{
+ printf("base-freq:\tEnables users to increase guaranteed base frequency\n\
+ on certain cores (high priority cores) in exchange for lower\n\
+ base frequency on remaining cores (low priority cores).\n");
+ printf("\tcommand : info\n");
+ printf("\tcommand : enable\n");
+ printf("\tcommand : disable\n");
+}
+
+static void fact_help(void)
+{
+ printf("turbo-freq:\tEnables the ability to set different turbo ratio\n\
+ limits to cores based on priority.\n");
+ printf("\nCommand: For feature=turbo-freq\n");
+ printf("\tcommand : info\n");
+ printf("\tcommand : enable\n");
+ printf("\tcommand : disable\n");
+}
+
+static void core_power_help(void)
+{
+ printf("core-power:\tInterface that allows user to define per core/tile\n\
+ priority.\n");
+ printf("\nCommands : For feature=core-power\n");
+ printf("\tinfo\n");
+ printf("\tenable\n");
+ printf("\tdisable\n");
+ printf("\tconfig\n");
+ printf("\tassoc\n");
+ printf("\tget-assoc\n");
+}
+
+struct process_cmd_help_struct {
+ char *feature;
+ void (*process_fn)(void);
+};
+
+static struct process_cmd_help_struct isst_help_cmds[] = {
+ { "perf-profile", isst_help },
+ { "base-freq", pbf_help },
+ { "turbo-freq", fact_help },
+ { "core-power", core_power_help },
+ { NULL, NULL }
+};
+
+void process_command(int argc, char **argv)
+{
+ int i = 0, matched = 0;
+ char *feature = argv[optind];
+ char *cmd = argv[optind + 1];
+
+ if (!feature || !cmd)
+ return;
+
+ debug_printf("feature name [%s] command [%s]\n", feature, cmd);
+ if (!strcmp(cmd, "-h") || !strcmp(cmd, "--help")) {
+ while (isst_help_cmds[i].feature) {
+ if (!strcmp(isst_help_cmds[i].feature, feature)) {
+ isst_help_cmds[i].process_fn();
+ exit(0);
+ }
+ ++i;
+ }
+ }
+
+ create_cpu_map();
+
+ i = 0;
+ while (isst_cmds[i].feature) {
+ if (!strcmp(isst_cmds[i].feature, feature) &&
+ !strcmp(isst_cmds[i].command, cmd)) {
+ parse_cmd_args(argc, optind + 1, argv);
+ isst_cmds[i].process_fn();
+ matched = 1;
+ break;
+ }
+ ++i;
+ }
+
+ if (!matched)
+ fprintf(stderr, "Invalid command\n");
+}
+
+static void usage(void)
+{
+ printf("Intel(R) Speed Select Technology\n");
+ printf("\nUsage:\n");
+ printf("intel-speed-select [OPTIONS] FEATURE COMMAND COMMAND_ARGUMENTS\n");
+ printf("\nUse this tool to enumerate and control the Intel Speed Select Technology features,\n");
+ printf("\nFEATURE : [perf-profile|base-freq|turbo-freq|core-power]\n");
+ printf("\nFor help on each feature, use --h|--help\n");
+ printf("\tFor example: intel-speed-select perf-profile -h\n");
+
+ printf("\nFor additional help on each command for a feature, use --h|--help\n");
+ printf("\tFor example: intel-speed-select perf-profile get-lock-status -h\n");
+ printf("\t\t This will print help for the command \"get-lock-status\" for the feature \"perf-profile\"\n");
+
+ printf("\nOPTIONS\n");
+ printf("\t[-c|--cpu] : logical cpu number\n");
+ printf("\t\tDefault: Die scoped for all dies in the system with multiple dies/package\n");
+ printf("\t\t\t Or Package scoped for all Packages when each package contains one die\n");
+ printf("\t[-d|--debug] : Debug mode\n");
+ printf("\t[-h|--help] : Print help\n");
+ printf("\t[-i|--info] : Print platform information\n");
+ printf("\t[-o|--out] : Output file\n");
+ printf("\t\t\tDefault : stderr\n");
+ printf("\t[-f|--format] : output format [json|text]. Default: text\n");
+ printf("\t[-v|--version] : Print version\n");
+
+ printf("\nResult format\n");
+ printf("\tResult display uses a common format for each command:\n");
+ printf("\tResults are formatted in text/JSON with\n");
+ printf("\t\tPackage, Die, CPU, and command specific results.\n");
+ printf("\t\t\tFor Set commands, status is 0 for success and rest for failures\n");
+ exit(1);
+}
+
+static void print_version(void)
+{
+ fprintf(outf, "Version %s\n", version_str);
+ fprintf(outf, "Build date %s time %s\n", __DATE__, __TIME__);
+ exit(0);
+}
+
+static void cmdline(int argc, char **argv)
+{
+ int opt;
+ int option_index = 0;
+
+ static struct option long_options[] = {
+ { "cpu", required_argument, 0, 'c' },
+ { "debug", no_argument, 0, 'd' },
+ { "format", required_argument, 0, 'f' },
+ { "help", no_argument, 0, 'h' },
+ { "info", no_argument, 0, 'i' },
+ { "out", required_argument, 0, 'o' },
+ { "version", no_argument, 0, 'v' },
+ { 0, 0, 0, 0 }
+ };
+
+ progname = argv[0];
+ while ((opt = getopt_long_only(argc, argv, "+c:df:hio:v", long_options,
+ &option_index)) != -1) {
+ switch (opt) {
+ case 'c':
+ parse_cpu_command(optarg);
+ break;
+ case 'd':
+ debug_flag = 1;
+ printf("Debug Mode ON\n");
+ break;
+ case 'f':
+ if (!strncmp(optarg, "json", 4))
+ out_format_json = 1;
+ break;
+ case 'h':
+ usage();
+ break;
+ case 'i':
+ isst_print_platform_information();
+ break;
+ case 'o':
+ if (outf)
+ fclose(outf);
+ outf = fopen_or_exit(optarg, "w");
+ break;
+ case 'v':
+ print_version();
+ break;
+ default:
+ usage();
+ }
+ }
+
+ if (geteuid() != 0) {
+ fprintf(stderr, "Must run as root\n");
+ exit(0);
+ }
+
+ if (optind > (argc - 2)) {
+ fprintf(stderr, "Feature name and|or command not specified\n");
+ exit(0);
+ }
+ update_cpu_model();
+ printf("Intel(R) Speed Select Technology\n");
+ printf("Executing on CPU model:%d[0x%x]\n", cpu_model, cpu_model);
+ set_max_cpu_num();
+ set_cpu_present_cpu_mask();
+ set_cpu_target_cpu_mask();
+ isst_fill_platform_info();
+ if (isst_platform_info.api_version > supported_api_ver) {
+ printf("Incompatible API versions; Upgrade of tool is required\n");
+ exit(0);
+ }
+
+ process_command(argc, argv);
+}
+
+int main(int argc, char **argv)
+{
+ outf = stderr;
+ cmdline(argc, argv);
+ return 0;
+}
diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
new file mode 100644
index 000000000000..8de4ac39a008
--- /dev/null
+++ b/tools/power/x86/intel-speed-select/isst-core.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Speed Select -- Enumerate and control features
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include "isst.h"
+
+int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_LEVELS_INFO, 0, 0, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CONFIG_TDP_GET_LEVELS_INFO resp:%x\n", cpu, resp);
+
+ pkg_dev->version = resp & 0xff;
+ pkg_dev->levels = (resp >> 8) & 0xff;
+ pkg_dev->current_level = (resp >> 16) & 0xff;
+ pkg_dev->locked = !!(resp & BIT(24));
+ pkg_dev->enabled = !!(resp & BIT(31));
+
+ return 0;
+}
+
+int isst_get_ctdp_control(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_TDP_CONTROL, 0,
+ config_index, &resp);
+ if (ret)
+ return ret;
+
+ ctdp_level->fact_support = resp & BIT(0);
+ ctdp_level->pbf_support = !!(resp & BIT(1));
+ ctdp_level->fact_enabled = !!(resp & BIT(16));
+ ctdp_level->pbf_enabled = !!(resp & BIT(17));
+
+ debug_printf(
+ "cpu:%d CONFIG_TDP_GET_TDP_CONTROL resp:%x fact_support:%d pbf_support: %d fact_enabled:%d pbf_enabled:%d\n",
+ cpu, resp, ctdp_level->fact_support, ctdp_level->pbf_support,
+ ctdp_level->fact_enabled, ctdp_level->pbf_enabled);
+
+ return 0;
+}
+
+int isst_get_tdp_info(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_TDP_INFO,
+ 0, config_index, &resp);
+ if (ret)
+ return ret;
+
+ ctdp_level->pkg_tdp = resp & GENMASK(14, 0);
+ ctdp_level->tdp_ratio = (resp & GENMASK(23, 16)) >> 16;
+
+ debug_printf(
+ "cpu:%d ctdp:%d CONFIG_TDP_GET_TDP_INFO resp:%x tdp_ratio:%d pkg_tdp:%d\n",
+ cpu, config_index, resp, ctdp_level->tdp_ratio,
+ ctdp_level->pkg_tdp);
+ return 0;
+}
+
+int isst_get_pwr_info(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_PWR_INFO,
+ 0, config_index, &resp);
+ if (ret)
+ return ret;
+
+ ctdp_level->pkg_max_power = resp & GENMASK(14, 0);
+ ctdp_level->pkg_min_power = (resp & GENMASK(30, 16)) >> 16;
+
+ debug_printf(
+ "cpu:%d ctdp:%d CONFIG_TDP_GET_PWR_INFO resp:%x pkg_max_power:%d pkg_min_power:%d\n",
+ cpu, config_index, resp, ctdp_level->pkg_max_power,
+ ctdp_level->pkg_min_power);
+
+ return 0;
+}
+
+int isst_get_tjmax_info(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_TJMAX_INFO,
+ 0, config_index, &resp);
+ if (ret)
+ return ret;
+
+ ctdp_level->t_proc_hot = resp & GENMASK(7, 0);
+
+ debug_printf(
+ "cpu:%d ctdp:%d CONFIG_TDP_GET_TJMAX_INFO resp:%x t_proc_hot:%d\n",
+ cpu, config_index, resp, ctdp_level->t_proc_hot);
+
+ return 0;
+}
+
+int isst_get_coremask_info(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level)
+{
+ unsigned int resp;
+ int i, ret;
+
+ ctdp_level->cpu_count = 0;
+ for (i = 0; i < 2; ++i) {
+ unsigned long long mask;
+ int cpu_count = 0;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_CORE_MASK, 0,
+ (i << 8) | config_index, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf(
+ "cpu:%d ctdp:%d mask:%d CONFIG_TDP_GET_CORE_MASK resp:%x\n",
+ cpu, config_index, i, resp);
+
+ mask = (unsigned long long)resp << (32 * i);
+ set_cpu_mask_from_punit_coremask(cpu, mask,
+ ctdp_level->core_cpumask_size,
+ ctdp_level->core_cpumask,
+ &cpu_count);
+ ctdp_level->cpu_count += cpu_count;
+ debug_printf("cpu:%d ctdp:%d mask:%d cpu count:%d\n", cpu,
+ config_index, i, ctdp_level->cpu_count);
+ }
+
+ return 0;
+}
+
+int isst_get_get_trl(int cpu, int level, int avx_level, int *trl)
+{
+ unsigned int req, resp;
+ int ret;
+
+ req = level | (avx_level << 16);
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_TURBO_LIMIT_RATIOS, 0, req,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf(
+ "cpu:%d CONFIG_TDP_GET_TURBO_LIMIT_RATIOS req:%x resp:%x\n",
+ cpu, req, resp);
+
+ trl[0] = resp & GENMASK(7, 0);
+ trl[1] = (resp & GENMASK(15, 8)) >> 8;
+ trl[2] = (resp & GENMASK(23, 16)) >> 16;
+ trl[3] = (resp & GENMASK(31, 24)) >> 24;
+
+ req = level | BIT(8) | (avx_level << 16);
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_TURBO_LIMIT_RATIOS, 0, req,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CONFIG_TDP_GET_TURBO_LIMIT req:%x resp:%x\n", cpu,
+ req, resp);
+
+ trl[4] = resp & GENMASK(7, 0);
+ trl[5] = (resp & GENMASK(15, 8)) >> 8;
+ trl[6] = (resp & GENMASK(23, 16)) >> 16;
+ trl[7] = (resp & GENMASK(31, 24)) >> 24;
+
+ return 0;
+}
+
+int isst_set_tdp_level_msr(int cpu, int tdp_level)
+{
+ int ret;
+
+ debug_printf("cpu: tdp_level via MSR %d\n", cpu, tdp_level);
+
+ if (isst_get_config_tdp_lock_status(cpu)) {
+ debug_printf("cpu: tdp_locked %d\n", cpu);
+ return -1;
+ }
+
+ if (tdp_level > 2)
+ return -1; /* invalid value */
+
+ ret = isst_send_msr_command(cpu, 0x64b, 1,
+ (unsigned long long *)&tdp_level);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu: tdp_level via MSR successful %d\n", cpu, tdp_level);
+
+ return 0;
+}
+
+int isst_set_tdp_level(int cpu, int tdp_level)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_SET_LEVEL, 0,
+ tdp_level, &resp);
+ if (ret)
+ return isst_set_tdp_level_msr(cpu, tdp_level);
+
+ return 0;
+}
+
+int isst_get_pbf_info(int cpu, int level, struct isst_pbf_info *pbf_info)
+{
+ unsigned int req, resp;
+ int i, ret;
+
+ pbf_info->core_cpumask_size = alloc_cpu_set(&pbf_info->core_cpumask);
+
+ for (i = 0; i < 2; ++i) {
+ unsigned long long mask;
+ int count;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_PBF_GET_CORE_MASK_INFO,
+ 0, (i << 8) | level, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf(
+ "cpu:%d CONFIG_TDP_PBF_GET_CORE_MASK_INFO resp:%x\n",
+ cpu, resp);
+
+ mask = (unsigned long long)resp << (32 * i);
+ set_cpu_mask_from_punit_coremask(cpu, mask,
+ pbf_info->core_cpumask_size,
+ pbf_info->core_cpumask,
+ &count);
+ }
+
+ req = level;
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_PBF_GET_P1HI_P1LO_INFO, 0, req,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CONFIG_TDP_PBF_GET_P1HI_P1LO_INFO resp:%x\n", cpu,
+ resp);
+
+ pbf_info->p1_low = resp & 0xff;
+ pbf_info->p1_high = (resp & GENMASK(15, 8)) >> 8;
+
+ req = level;
+ ret = isst_send_mbox_command(
+ cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TDP_INFO, 0, req, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CONFIG_TDP_PBF_GET_TDP_INFO resp:%x\n", cpu, resp);
+
+ pbf_info->tdp = resp & 0xffff;
+
+ req = level;
+ ret = isst_send_mbox_command(
+ cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TJ_MAX_INFO, 0, req, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CONFIG_TDP_PBF_GET_TJ_MAX_INFO resp:%x\n", cpu,
+ resp);
+ pbf_info->t_control = (resp >> 8) & 0xff;
+ pbf_info->t_prochot = resp & 0xff;
+
+ return 0;
+}
+
+void isst_get_pbf_info_complete(struct isst_pbf_info *pbf_info)
+{
+ free_cpu_set(pbf_info->core_cpumask);
+}
+
+int isst_set_pbf_fact_status(int cpu, int pbf, int enable)
+{
+ struct isst_pkg_ctdp pkg_dev;
+ struct isst_pkg_ctdp_level_info ctdp_level;
+ int current_level;
+ unsigned int req = 0, resp;
+ int ret;
+
+ ret = isst_get_ctdp_levels(cpu, &pkg_dev);
+ if (ret)
+ return ret;
+
+ current_level = pkg_dev.current_level;
+
+ ret = isst_get_ctdp_control(cpu, current_level, &ctdp_level);
+ if (ret)
+ return ret;
+
+ if (pbf) {
+ if (ctdp_level.fact_enabled)
+ req = BIT(16);
+
+ if (enable)
+ req |= BIT(17);
+ else
+ req &= ~BIT(17);
+ } else {
+ if (ctdp_level.pbf_enabled)
+ req = BIT(17);
+
+ if (enable)
+ req |= BIT(16);
+ else
+ req &= ~BIT(16);
+ }
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_SET_TDP_CONTROL, 0, req, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CONFIG_TDP_SET_TDP_CONTROL pbf/fact:%d req:%x\n",
+ cpu, pbf, req);
+
+ return 0;
+}
+
+int isst_get_fact_bucket_info(int cpu, int level,
+ struct isst_fact_bucket_info *bucket_info)
+{
+ unsigned int resp;
+ int i, k, ret;
+
+ for (i = 0; i < 2; ++i) {
+ int j;
+
+ ret = isst_send_mbox_command(
+ cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_NUMCORES, 0,
+ (i << 8) | level, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf(
+ "cpu:%d CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_NUMCORES index:%d level:%d resp:%x\n",
+ cpu, i, level, resp);
+
+ for (j = 0; j < 4; ++j) {
+ bucket_info[j + (i * 4)].high_priority_cores_count =
+ (resp >> (j * 8)) & 0xff;
+ }
+ }
+
+ for (k = 0; k < 3; ++k) {
+ for (i = 0; i < 2; ++i) {
+ int j;
+
+ ret = isst_send_mbox_command(
+ cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_RATIOS, 0,
+ (k << 16) | (i << 8) | level, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf(
+ "cpu:%d CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_RATIOS index:%d level:%d avx:%d resp:%x\n",
+ cpu, i, level, k, resp);
+
+ for (j = 0; j < 4; ++j) {
+ switch (k) {
+ case 0:
+ bucket_info[j + (i * 4)].sse_trl =
+ (resp >> (j * 8)) & 0xff;
+ break;
+ case 1:
+ bucket_info[j + (i * 4)].avx_trl =
+ (resp >> (j * 8)) & 0xff;
+ break;
+ case 2:
+ bucket_info[j + (i * 4)].avx512_trl =
+ (resp >> (j * 8)) & 0xff;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int isst_get_fact_info(int cpu, int level, struct isst_fact_info *fact_info)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_FACT_LP_CLIPPING_RATIO, 0,
+ level, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CONFIG_TDP_GET_FACT_LP_CLIPPING_RATIO resp:%x\n",
+ cpu, resp);
+
+ fact_info->lp_clipping_ratio_license_sse = resp & 0xff;
+ fact_info->lp_clipping_ratio_license_avx2 = (resp >> 8) & 0xff;
+ fact_info->lp_clipping_ratio_license_avx512 = (resp >> 16) & 0xff;
+
+ ret = isst_get_fact_bucket_info(cpu, level, fact_info->bucket_info);
+
+ return ret;
+}
+
+int isst_set_trl(int cpu, unsigned long long trl)
+{
+ int ret;
+
+ if (!trl)
+ trl = 0xFFFFFFFFFFFFFFFFULL;
+
+ ret = isst_send_msr_command(cpu, 0x1AD, 1, &trl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int isst_set_trl_from_current_tdp(int cpu, unsigned long long trl)
+{
+ unsigned long long msr_trl;
+ int ret;
+
+ if (trl) {
+ msr_trl = trl;
+ } else {
+ struct isst_pkg_ctdp pkg_dev;
+ int trl[8];
+ int i;
+
+ ret = isst_get_ctdp_levels(cpu, &pkg_dev);
+ if (ret)
+ return ret;
+
+ ret = isst_get_get_trl(cpu, pkg_dev.current_level, 0, trl);
+ if (ret)
+ return ret;
+
+ msr_trl = 0;
+ for (i = 0; i < 8; ++i) {
+ unsigned long long _trl = trl[i];
+
+ msr_trl |= (_trl << (i * 8));
+ }
+ }
+ ret = isst_send_msr_command(cpu, 0x1AD, 1, &msr_trl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Return 1 if locked */
+int isst_get_config_tdp_lock_status(int cpu)
+{
+ unsigned long long tdp_control = 0;
+ int ret;
+
+ ret = isst_send_msr_command(cpu, 0x64b, 0, &tdp_control);
+ if (ret)
+ return ret;
+
+ ret = !!(tdp_control & BIT(31));
+
+ return ret;
+}
+
+void isst_get_process_ctdp_complete(int cpu, struct isst_pkg_ctdp *pkg_dev)
+{
+ int i;
+
+ if (!pkg_dev->processed)
+ return;
+
+ for (i = 0; i < pkg_dev->levels; ++i) {
+ struct isst_pkg_ctdp_level_info *ctdp_level;
+
+ ctdp_level = &pkg_dev->ctdp_level[i];
+ if (ctdp_level->pbf_support)
+ free_cpu_set(ctdp_level->pbf_info.core_cpumask);
+ free_cpu_set(ctdp_level->core_cpumask);
+ }
+}
+
+int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
+{
+ int i, ret;
+
+ if (pkg_dev->processed)
+ return 0;
+
+ ret = isst_get_ctdp_levels(cpu, pkg_dev);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu: %d ctdp enable:%d current level: %d levels:%d\n",
+ cpu, pkg_dev->enabled, pkg_dev->current_level,
+ pkg_dev->levels);
+
+ for (i = 0; i <= pkg_dev->levels; ++i) {
+ struct isst_pkg_ctdp_level_info *ctdp_level;
+
+ if (tdp_level != 0xff && i != tdp_level)
+ continue;
+
+ debug_printf("cpu:%d Get Information for TDP level:%d\n", cpu,
+ i);
+ ctdp_level = &pkg_dev->ctdp_level[i];
+
+ ctdp_level->processed = 1;
+ ctdp_level->level = i;
+ ctdp_level->control_cpu = cpu;
+ ctdp_level->pkg_id = get_physical_package_id(cpu);
+ ctdp_level->die_id = get_physical_die_id(cpu);
+
+ ret = isst_get_ctdp_control(cpu, i, ctdp_level);
+ if (ret)
+ return ret;
+
+ ret = isst_get_tdp_info(cpu, i, ctdp_level);
+ if (ret)
+ return ret;
+
+ ret = isst_get_pwr_info(cpu, i, ctdp_level);
+ if (ret)
+ return ret;
+
+ ret = isst_get_tjmax_info(cpu, i, ctdp_level);
+ if (ret)
+ return ret;
+
+ ctdp_level->core_cpumask_size =
+ alloc_cpu_set(&ctdp_level->core_cpumask);
+ ret = isst_get_coremask_info(cpu, i, ctdp_level);
+ if (ret)
+ return ret;
+
+ ret = isst_get_get_trl(cpu, i, 0,
+ ctdp_level->trl_sse_active_cores);
+ if (ret)
+ return ret;
+
+ ret = isst_get_get_trl(cpu, i, 1,
+ ctdp_level->trl_avx_active_cores);
+ if (ret)
+ return ret;
+
+ ret = isst_get_get_trl(cpu, i, 2,
+ ctdp_level->trl_avx_512_active_cores);
+ if (ret)
+ return ret;
+
+ if (ctdp_level->pbf_support) {
+ ret = isst_get_pbf_info(cpu, i, &ctdp_level->pbf_info);
+ if (!ret)
+ ctdp_level->pbf_found = 1;
+ }
+
+ if (ctdp_level->fact_support) {
+ ret = isst_get_fact_info(cpu, i,
+ &ctdp_level->fact_info);
+ if (ret)
+ return ret;
+ }
+ }
+
+ pkg_dev->processed = 1;
+
+ return 0;
+}
+
+int isst_pm_qos_config(int cpu, int enable_clos, int priority_type)
+{
+ unsigned int req, resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", cpu, resp);
+
+ req = resp;
+
+ if (enable_clos)
+ req = req | BIT(1);
+ else
+ req = req & ~BIT(1);
+
+ if (priority_type)
+ req = req | BIT(2);
+ else
+ req = req & ~BIT(2);
+
+ ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG,
+ BIT(MBOX_CMD_WRITE_BIT), req, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CLOS_PM_QOS_CONFIG priority type:%d req:%x\n", cpu,
+ priority_type, req);
+
+ return 0;
+}
+
+int isst_pm_get_clos(int cpu, int clos, struct isst_clos_config *clos_config)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_CLOS, clos, 0,
+ &resp);
+ if (ret)
+ return ret;
+
+ clos_config->pkg_id = get_physical_package_id(cpu);
+ clos_config->die_id = get_physical_die_id(cpu);
+
+ clos_config->epp = resp & 0x0f;
+ clos_config->clos_prop_prio = (resp >> 4) & 0x0f;
+ clos_config->clos_min = (resp >> 8) & 0xff;
+ clos_config->clos_max = (resp >> 16) & 0xff;
+ clos_config->clos_desired = (resp >> 24) & 0xff;
+
+ return 0;
+}
+
+int isst_set_clos(int cpu, int clos, struct isst_clos_config *clos_config)
+{
+ unsigned int req, resp;
+ unsigned int param;
+ int ret;
+
+ req = clos_config->epp & 0x0f;
+ req |= (clos_config->clos_prop_prio & 0x0f) << 4;
+ req |= (clos_config->clos_min & 0xff) << 8;
+ req |= (clos_config->clos_max & 0xff) << 16;
+ req |= (clos_config->clos_desired & 0xff) << 24;
+
+ param = BIT(MBOX_CMD_WRITE_BIT) | clos;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_CLOS, param, req,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CLOS_PM_CLOS param:%x req:%x\n", cpu, param, req);
+
+ return 0;
+}
+
+int isst_clos_get_assoc_status(int cpu, int *clos_id)
+{
+ unsigned int resp;
+ unsigned int param;
+ int core_id, ret;
+
+ core_id = find_phy_core_num(cpu);
+ param = core_id;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PQR_ASSOC, param, 0,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CLOS_PQR_ASSOC param:%x resp:%x\n", cpu, param,
+ resp);
+ *clos_id = (resp >> 16) & 0x03;
+
+ return 0;
+}
+
+int isst_clos_associate(int cpu, int clos_id)
+{
+ unsigned int req, resp;
+ unsigned int param;
+ int core_id, ret;
+
+ req = (clos_id & 0x03) << 16;
+ core_id = find_phy_core_num(cpu);
+ param = BIT(MBOX_CMD_WRITE_BIT) | core_id;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PQR_ASSOC, param,
+ req, &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CLOS_PQR_ASSOC param:%x req:%x\n", cpu, param,
+ req);
+
+ return 0;
+}
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
new file mode 100644
index 000000000000..f368b8323742
--- /dev/null
+++ b/tools/power/x86/intel-speed-select/isst-display.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel dynamic_speed_select -- Enumerate and control features
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include "isst.h"
+
+#define DISP_FREQ_MULTIPLIER 100000
+
+static void printcpumask(int str_len, char *str, int mask_size,
+ cpu_set_t *cpu_mask)
+{
+ int i, max_cpus = get_topo_max_cpus();
+ unsigned int *mask;
+ int size, index, curr_index;
+
+ size = max_cpus / (sizeof(unsigned int) * 8);
+ if (max_cpus % (sizeof(unsigned int) * 8))
+ size++;
+
+ mask = calloc(size, sizeof(unsigned int));
+ if (!mask)
+ return;
+
+ for (i = 0; i < max_cpus; ++i) {
+ int mask_index, bit_index;
+
+ if (!CPU_ISSET_S(i, mask_size, cpu_mask))
+ continue;
+
+ mask_index = i / (sizeof(unsigned int) * 8);
+ bit_index = i % (sizeof(unsigned int) * 8);
+ mask[mask_index] |= BIT(bit_index);
+ }
+
+ curr_index = 0;
+ for (i = size - 1; i >= 0; --i) {
+ index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
+ mask[i]);
+ curr_index += index;
+ if (i) {
+ strncat(&str[curr_index], ",", str_len - curr_index);
+ curr_index++;
+ }
+ }
+
+ free(mask);
+}
+
+static void format_and_print_txt(FILE *outf, int level, char *header,
+ char *value)
+{
+ char *spaces = " ";
+ static char delimiters[256];
+ int i, j = 0;
+
+ if (!level)
+ return;
+
+ if (level == 1) {
+ strcpy(delimiters, " ");
+ } else {
+ for (i = 0; i < level - 1; ++i)
+ j += snprintf(&delimiters[j], sizeof(delimiters) - j,
+ "%s", spaces);
+ }
+
+ if (header && value) {
+ fprintf(outf, "%s", delimiters);
+ fprintf(outf, "%s:%s\n", header, value);
+ } else if (header) {
+ fprintf(outf, "%s", delimiters);
+ fprintf(outf, "%s\n", header);
+ }
+}
+
+static int last_level;
+static void format_and_print(FILE *outf, int level, char *header, char *value)
+{
+ char *spaces = " ";
+ static char delimiters[256];
+ int i;
+
+ if (!out_format_is_json()) {
+ format_and_print_txt(outf, level, header, value);
+ return;
+ }
+
+ if (level == 0) {
+ if (header)
+ fprintf(outf, "{");
+ else
+ fprintf(outf, "\n}\n");
+
+ } else {
+ int j = 0;
+
+ for (i = 0; i < level; ++i)
+ j += snprintf(&delimiters[j], sizeof(delimiters) - j,
+ "%s", spaces);
+
+ if (last_level == level)
+ fprintf(outf, ",\n");
+
+ if (value) {
+ if (last_level != level)
+ fprintf(outf, "\n");
+
+ fprintf(outf, "%s\"%s\": ", delimiters, header);
+ fprintf(outf, "\"%s\"", value);
+ } else {
+ for (i = last_level - 1; i >= level; --i) {
+ int k = 0;
+
+ for (j = i; j > 0; --j)
+ k += snprintf(&delimiters[k],
+ sizeof(delimiters) - k,
+ "%s", spaces);
+ if (i == level && header)
+ fprintf(outf, "\n%s},", delimiters);
+ else
+ fprintf(outf, "\n%s}", delimiters);
+ }
+ if (abs(last_level - level) < 3)
+ fprintf(outf, "\n");
+ if (header)
+ fprintf(outf, "%s\"%s\": {", delimiters,
+ header);
+ }
+ }
+
+ last_level = level;
+}
+
+static void print_packag_info(int cpu, FILE *outf)
+{
+ char header[256];
+
+ snprintf(header, sizeof(header), "package-%d",
+ get_physical_package_id(cpu));
+ format_and_print(outf, 1, header, NULL);
+ snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+ format_and_print(outf, 2, header, NULL);
+ snprintf(header, sizeof(header), "cpu-%d", cpu);
+ format_and_print(outf, 3, header, NULL);
+}
+
+static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
+ struct isst_pbf_info *pbf_info,
+ int disp_level)
+{
+ char header[256];
+ char value[256];
+
+ snprintf(header, sizeof(header), "speed-select-base-freq");
+ format_and_print(outf, disp_level, header, NULL);
+
+ snprintf(header, sizeof(header), "high-priority-base-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ pbf_info->p1_high * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, disp_level + 1, header, value);
+
+ snprintf(header, sizeof(header), "high-priority-cpu-mask");
+ printcpumask(sizeof(value), value, pbf_info->core_cpumask_size,
+ pbf_info->core_cpumask);
+ format_and_print(outf, disp_level + 1, header, value);
+
+ snprintf(header, sizeof(header), "low-priority-base-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ pbf_info->p1_low * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, disp_level + 1, header, value);
+
+ snprintf(header, sizeof(header), "tjunction-temperature(C)");
+ snprintf(value, sizeof(value), "%d", pbf_info->t_prochot);
+ format_and_print(outf, disp_level + 1, header, value);
+
+ snprintf(header, sizeof(header), "thermal-design-power(W)");
+ snprintf(value, sizeof(value), "%d", pbf_info->tdp);
+ format_and_print(outf, disp_level + 1, header, value);
+}
+
+static void _isst_fact_display_information(int cpu, FILE *outf, int level,
+ int fact_bucket, int fact_avx,
+ struct isst_fact_info *fact_info,
+ int base_level)
+{
+ struct isst_fact_bucket_info *bucket_info = fact_info->bucket_info;
+ char header[256];
+ char value[256];
+ int j;
+
+ snprintf(header, sizeof(header), "speed-select-turbo-freq");
+ format_and_print(outf, base_level, header, NULL);
+ for (j = 0; j < ISST_FACT_MAX_BUCKETS; ++j) {
+ if (fact_bucket != 0xff && fact_bucket != j)
+ continue;
+
+ if (!bucket_info[j].high_priority_cores_count)
+ break;
+
+ snprintf(header, sizeof(header), "bucket-%d", j);
+ format_and_print(outf, base_level + 1, header, NULL);
+
+ snprintf(header, sizeof(header), "high-priority-cores-count");
+ snprintf(value, sizeof(value), "%d",
+ bucket_info[j].high_priority_cores_count);
+ format_and_print(outf, base_level + 2, header, value);
+
+ if (fact_avx & 0x01) {
+ snprintf(header, sizeof(header),
+ "high-priority-max-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ bucket_info[j].sse_trl * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 2, header, value);
+ }
+
+ if (fact_avx & 0x02) {
+ snprintf(header, sizeof(header),
+ "high-priority-max-avx2-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ bucket_info[j].avx_trl * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 2, header, value);
+ }
+
+ if (fact_avx & 0x04) {
+ snprintf(header, sizeof(header),
+ "high-priority-max-avx512-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ bucket_info[j].avx512_trl *
+ DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 2, header, value);
+ }
+ }
+ snprintf(header, sizeof(header),
+ "speed-select-turbo-freq-clip-frequencies");
+ format_and_print(outf, base_level + 1, header, NULL);
+ snprintf(header, sizeof(header), "low-priority-max-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ fact_info->lp_clipping_ratio_license_sse *
+ DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 2, header, value);
+ snprintf(header, sizeof(header),
+ "low-priority-max-avx2-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ fact_info->lp_clipping_ratio_license_avx2 *
+ DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 2, header, value);
+ snprintf(header, sizeof(header),
+ "low-priority-max-avx512-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ fact_info->lp_clipping_ratio_license_avx512 *
+ DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 2, header, value);
+}
+
+void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
+ struct isst_pkg_ctdp *pkg_dev)
+{
+ char header[256];
+ char value[256];
+ int i, base_level = 1;
+
+ print_packag_info(cpu, outf);
+
+ for (i = 0; i <= pkg_dev->levels; ++i) {
+ struct isst_pkg_ctdp_level_info *ctdp_level;
+ int j;
+
+ ctdp_level = &pkg_dev->ctdp_level[i];
+ if (!ctdp_level->processed)
+ continue;
+
+ snprintf(header, sizeof(header), "perf-profile-level-%d",
+ ctdp_level->level);
+ format_and_print(outf, base_level + 3, header, NULL);
+
+ snprintf(header, sizeof(header), "cpu-count");
+ j = get_cpu_count(get_physical_die_id(cpu),
+ get_physical_die_id(cpu));
+ snprintf(value, sizeof(value), "%d", j);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header), "enable-cpu-mask");
+ printcpumask(sizeof(value), value,
+ ctdp_level->core_cpumask_size,
+ ctdp_level->core_cpumask);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header), "thermal-design-power-ratio");
+ snprintf(value, sizeof(value), "%d", ctdp_level->tdp_ratio);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header), "base-frequency(KHz)");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->tdp_ratio * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header),
+ "speed-select-turbo-freq-support");
+ snprintf(value, sizeof(value), "%d", ctdp_level->fact_support);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header),
+ "speed-select-base-freq-support");
+ snprintf(value, sizeof(value), "%d", ctdp_level->pbf_support);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header),
+ "speed-select-base-freq-enabled");
+ snprintf(value, sizeof(value), "%d", ctdp_level->pbf_enabled);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header),
+ "speed-select-turbo-freq-enabled");
+ snprintf(value, sizeof(value), "%d", ctdp_level->fact_enabled);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header), "thermal-design-power(W)");
+ snprintf(value, sizeof(value), "%d", ctdp_level->pkg_tdp);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header), "tjunction-max(C)");
+ snprintf(value, sizeof(value), "%d", ctdp_level->t_proc_hot);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header), "turbo-ratio-limits-sse");
+ format_and_print(outf, base_level + 4, header, NULL);
+ for (j = 0; j < 8; ++j) {
+ snprintf(header, sizeof(header), "bucket-%d", j);
+ format_and_print(outf, base_level + 5, header, NULL);
+
+ snprintf(header, sizeof(header), "core-count");
+ snprintf(value, sizeof(value), "%d", j);
+ format_and_print(outf, base_level + 6, header, value);
+
+ snprintf(header, sizeof(header), "turbo-ratio");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->trl_sse_active_cores[j]);
+ format_and_print(outf, base_level + 6, header, value);
+ }
+ snprintf(header, sizeof(header), "turbo-ratio-limits-avx");
+ format_and_print(outf, base_level + 4, header, NULL);
+ for (j = 0; j < 8; ++j) {
+ snprintf(header, sizeof(header), "bucket-%d", j);
+ format_and_print(outf, base_level + 5, header, NULL);
+
+ snprintf(header, sizeof(header), "core-count");
+ snprintf(value, sizeof(value), "%d", j);
+ format_and_print(outf, base_level + 6, header, value);
+
+ snprintf(header, sizeof(header), "turbo-ratio");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->trl_avx_active_cores[j]);
+ format_and_print(outf, base_level + 6, header, value);
+ }
+
+ snprintf(header, sizeof(header), "turbo-ratio-limits-avx512");
+ format_and_print(outf, base_level + 4, header, NULL);
+ for (j = 0; j < 8; ++j) {
+ snprintf(header, sizeof(header), "bucket-%d", j);
+ format_and_print(outf, base_level + 5, header, NULL);
+
+ snprintf(header, sizeof(header), "core-count");
+ snprintf(value, sizeof(value), "%d", j);
+ format_and_print(outf, base_level + 6, header, value);
+
+ snprintf(header, sizeof(header), "turbo-ratio");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->trl_avx_512_active_cores[j]);
+ format_and_print(outf, base_level + 6, header, value);
+ }
+ if (ctdp_level->pbf_support)
+ _isst_pbf_display_information(cpu, outf, i,
+ &ctdp_level->pbf_info,
+ base_level + 4);
+ if (ctdp_level->fact_support)
+ _isst_fact_display_information(cpu, outf, i, 0xff, 0xff,
+ &ctdp_level->fact_info,
+ base_level + 4);
+ }
+
+ format_and_print(outf, 1, NULL, NULL);
+}
+
+void isst_ctdp_display_information_start(FILE *outf)
+{
+ last_level = 0;
+ format_and_print(outf, 0, "start", NULL);
+}
+
+void isst_ctdp_display_information_end(FILE *outf)
+{
+ format_and_print(outf, 0, NULL, NULL);
+}
+
+void isst_pbf_display_information(int cpu, FILE *outf, int level,
+ struct isst_pbf_info *pbf_info)
+{
+ print_packag_info(cpu, outf);
+ _isst_pbf_display_information(cpu, outf, level, pbf_info, 4);
+ format_and_print(outf, 1, NULL, NULL);
+}
+
+void isst_fact_display_information(int cpu, FILE *outf, int level,
+ int fact_bucket, int fact_avx,
+ struct isst_fact_info *fact_info)
+{
+ print_packag_info(cpu, outf);
+ _isst_fact_display_information(cpu, outf, level, fact_bucket, fact_avx,
+ fact_info, 4);
+ format_and_print(outf, 1, NULL, NULL);
+}
+
+void isst_clos_display_information(int cpu, FILE *outf, int clos,
+ struct isst_clos_config *clos_config)
+{
+ char header[256];
+ char value[256];
+
+ snprintf(header, sizeof(header), "package-%d",
+ get_physical_package_id(cpu));
+ format_and_print(outf, 1, header, NULL);
+ snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+ format_and_print(outf, 2, header, NULL);
+ snprintf(header, sizeof(header), "cpu-%d", cpu);
+ format_and_print(outf, 3, header, NULL);
+
+ snprintf(header, sizeof(header), "core-power");
+ format_and_print(outf, 4, header, NULL);
+
+ snprintf(header, sizeof(header), "clos");
+ snprintf(value, sizeof(value), "%d", clos);
+ format_and_print(outf, 5, header, value);
+
+ snprintf(header, sizeof(header), "epp");
+ snprintf(value, sizeof(value), "%d", clos_config->epp);
+ format_and_print(outf, 5, header, value);
+
+ snprintf(header, sizeof(header), "clos-proportional-priority");
+ snprintf(value, sizeof(value), "%d", clos_config->clos_prop_prio);
+ format_and_print(outf, 5, header, value);
+
+ snprintf(header, sizeof(header), "clos-min");
+ snprintf(value, sizeof(value), "%d", clos_config->clos_min);
+ format_and_print(outf, 5, header, value);
+
+ snprintf(header, sizeof(header), "clos-max");
+ snprintf(value, sizeof(value), "%d", clos_config->clos_max);
+ format_and_print(outf, 5, header, value);
+
+ snprintf(header, sizeof(header), "clos-desired");
+ snprintf(value, sizeof(value), "%d", clos_config->clos_desired);
+ format_and_print(outf, 5, header, value);
+
+ format_and_print(outf, 1, NULL, NULL);
+}
+
+void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
+ int result)
+{
+ char header[256];
+ char value[256];
+
+ snprintf(header, sizeof(header), "package-%d",
+ get_physical_package_id(cpu));
+ format_and_print(outf, 1, header, NULL);
+ snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+ format_and_print(outf, 2, header, NULL);
+ snprintf(header, sizeof(header), "cpu-%d", cpu);
+ format_and_print(outf, 3, header, NULL);
+ snprintf(header, sizeof(header), "%s", feature);
+ format_and_print(outf, 4, header, NULL);
+ snprintf(header, sizeof(header), "%s", cmd);
+ snprintf(value, sizeof(value), "%d", result);
+ format_and_print(outf, 5, header, value);
+
+ format_and_print(outf, 1, NULL, NULL);
+}
diff --git a/tools/power/x86/intel-speed-select/isst.h b/tools/power/x86/intel-speed-select/isst.h
new file mode 100644
index 000000000000..221881761609
--- /dev/null
+++ b/tools/power/x86/intel-speed-select/isst.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Speed Select -- Enumerate and control features
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#ifndef _ISST_H_
+#define _ISST_H_
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sched.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <getopt.h>
+#include <err.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <cpuid.h>
+#include <dirent.h>
+#include <errno.h>
+
+#include <stdarg.h>
+#include <sys/ioctl.h>
+
+#define BIT(x) (1 << (x))
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (sizeof(long) * 8 - 1 - (h))))
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (sizeof(long long) * 8 - 1 - (h))))
+
+#define CONFIG_TDP 0x7f
+#define CONFIG_TDP_GET_LEVELS_INFO 0x00
+#define CONFIG_TDP_GET_TDP_CONTROL 0x01
+#define CONFIG_TDP_SET_TDP_CONTROL 0x02
+#define CONFIG_TDP_GET_TDP_INFO 0x03
+#define CONFIG_TDP_GET_PWR_INFO 0x04
+#define CONFIG_TDP_GET_TJMAX_INFO 0x05
+#define CONFIG_TDP_GET_CORE_MASK 0x06
+#define CONFIG_TDP_GET_TURBO_LIMIT_RATIOS 0x07
+#define CONFIG_TDP_SET_LEVEL 0x08
+#define CONFIG_TDP_GET_UNCORE_P0_P1_INFO 0X09
+#define CONFIG_TDP_GET_P1_INFO 0x0a
+#define CONFIG_TDP_GET_MEM_FREQ 0x0b
+
+#define CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_NUMCORES 0x10
+#define CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_RATIOS 0x11
+#define CONFIG_TDP_GET_FACT_LP_CLIPPING_RATIO 0x12
+
+#define CONFIG_TDP_PBF_GET_CORE_MASK_INFO 0x20
+#define CONFIG_TDP_PBF_GET_P1HI_P1LO_INFO 0x21
+#define CONFIG_TDP_PBF_GET_TJ_MAX_INFO 0x22
+#define CONFIG_TDP_PBF_GET_TDP_INFO 0X23
+
+#define CONFIG_CLOS 0xd0
+#define CLOS_PQR_ASSOC 0x00
+#define CLOS_PM_CLOS 0x01
+#define CLOS_PM_QOS_CONFIG 0x02
+#define CLOS_STATUS 0x03
+
+#define MBOX_CMD_WRITE_BIT 0x08
+
+#define PM_QOS_INFO_OFFSET 0x00
+#define PM_QOS_CONFIG_OFFSET 0x04
+#define PM_CLOS_OFFSET 0x08
+#define PQR_ASSOC_OFFSET 0x20
+
+struct isst_clos_config {
+ int pkg_id;
+ int die_id;
+ unsigned char epp;
+ unsigned char clos_prop_prio;
+ unsigned char clos_min;
+ unsigned char clos_max;
+ unsigned char clos_desired;
+};
+
+struct isst_fact_bucket_info {
+ int high_priority_cores_count;
+ int sse_trl;
+ int avx_trl;
+ int avx512_trl;
+};
+
+struct isst_pbf_info {
+ int pbf_acticated;
+ int pbf_available;
+ size_t core_cpumask_size;
+ cpu_set_t *core_cpumask;
+ int p1_high;
+ int p1_low;
+ int t_control;
+ int t_prochot;
+ int tdp;
+};
+
+#define ISST_TRL_MAX_ACTIVE_CORES 8
+#define ISST_FACT_MAX_BUCKETS 8
+struct isst_fact_info {
+ int lp_clipping_ratio_license_sse;
+ int lp_clipping_ratio_license_avx2;
+ int lp_clipping_ratio_license_avx512;
+ struct isst_fact_bucket_info bucket_info[ISST_FACT_MAX_BUCKETS];
+};
+
+struct isst_pkg_ctdp_level_info {
+ int processed;
+ int control_cpu;
+ int pkg_id;
+ int die_id;
+ int level;
+ int fact_support;
+ int pbf_support;
+ int fact_enabled;
+ int pbf_enabled;
+ int tdp_ratio;
+ int active;
+ int tdp_control;
+ int pkg_tdp;
+ int pkg_min_power;
+ int pkg_max_power;
+ int fact;
+ int t_proc_hot;
+ int uncore_p0;
+ int uncore_p1;
+ int sse_p1;
+ int avx2_p1;
+ int avx512_p1;
+ int mem_freq;
+ size_t core_cpumask_size;
+ cpu_set_t *core_cpumask;
+ int cpu_count;
+ int trl_sse_active_cores[ISST_TRL_MAX_ACTIVE_CORES];
+ int trl_avx_active_cores[ISST_TRL_MAX_ACTIVE_CORES];
+ int trl_avx_512_active_cores[ISST_TRL_MAX_ACTIVE_CORES];
+ int kobj_bucket_index;
+ int active_bucket;
+ int fact_max_index;
+ int fact_max_config;
+ int pbf_found;
+ int pbf_active;
+ struct isst_pbf_info pbf_info;
+ struct isst_fact_info fact_info;
+};
+
+#define ISST_MAX_TDP_LEVELS (4 + 1) /* +1 for base config */
+struct isst_pkg_ctdp {
+ int locked;
+ int version;
+ int processed;
+ int levels;
+ int current_level;
+ int enabled;
+ struct isst_pkg_ctdp_level_info ctdp_level[ISST_MAX_TDP_LEVELS];
+};
+
+extern int get_topo_max_cpus(void);
+extern int get_cpu_count(int pkg_id, int die_id);
+
+/* Common interfaces */
+extern void debug_printf(const char *format, ...);
+extern int out_format_is_json(void);
+extern int get_physical_package_id(int cpu);
+extern int get_physical_die_id(int cpu);
+extern size_t alloc_cpu_set(cpu_set_t **cpu_set);
+extern void free_cpu_set(cpu_set_t *cpu_set);
+extern int find_logical_cpu(int pkg_id, int die_id, int phy_cpu);
+extern int find_phy_cpu_num(int logical_cpu);
+extern int find_phy_core_num(int logical_cpu);
+extern void set_cpu_mask_from_punit_coremask(int cpu,
+ unsigned long long core_mask,
+ size_t core_cpumask_size,
+ cpu_set_t *core_cpumask,
+ int *cpu_cnt);
+
+extern int isst_send_mbox_command(unsigned int cpu, unsigned char command,
+ unsigned char sub_command,
+ unsigned int write,
+ unsigned int req_data, unsigned int *resp);
+
+extern int isst_send_msr_command(unsigned int cpu, unsigned int command,
+ int write, unsigned long long *req_resp);
+
+extern int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev);
+extern int isst_get_process_ctdp(int cpu, int tdp_level,
+ struct isst_pkg_ctdp *pkg_dev);
+extern void isst_get_process_ctdp_complete(int cpu,
+ struct isst_pkg_ctdp *pkg_dev);
+extern void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
+ struct isst_pkg_ctdp *pkg_dev);
+extern void isst_ctdp_display_information_start(FILE *outf);
+extern void isst_ctdp_display_information_end(FILE *outf);
+extern void isst_pbf_display_information(int cpu, FILE *outf, int level,
+ struct isst_pbf_info *info);
+extern int isst_set_tdp_level(int cpu, int tdp_level);
+extern int isst_set_tdp_level_msr(int cpu, int tdp_level);
+extern int isst_set_pbf_fact_status(int cpu, int pbf, int enable);
+extern int isst_get_pbf_info(int cpu, int level,
+ struct isst_pbf_info *pbf_info);
+extern void isst_get_pbf_info_complete(struct isst_pbf_info *pbf_info);
+extern int isst_get_fact_info(int cpu, int level,
+ struct isst_fact_info *fact_info);
+extern int isst_get_fact_bucket_info(int cpu, int level,
+ struct isst_fact_bucket_info *bucket_info);
+extern void isst_fact_display_information(int cpu, FILE *outf, int level,
+ int fact_bucket, int fact_avx,
+ struct isst_fact_info *fact_info);
+extern int isst_set_trl(int cpu, unsigned long long trl);
+extern int isst_set_trl_from_current_tdp(int cpu, unsigned long long trl);
+extern int isst_get_config_tdp_lock_status(int cpu);
+
+extern int isst_pm_qos_config(int cpu, int enable_clos, int priority_type);
+extern int isst_pm_get_clos(int cpu, int clos,
+ struct isst_clos_config *clos_config);
+extern int isst_set_clos(int cpu, int clos,
+ struct isst_clos_config *clos_config);
+extern int isst_clos_associate(int cpu, int clos);
+extern int isst_clos_get_assoc_status(int cpu, int *clos_id);
+extern void isst_clos_display_information(int cpu, FILE *outf, int clos,
+ struct isst_clos_config *clos_config);
+
+extern int isst_read_reg(unsigned short reg, unsigned int *val);
+extern int isst_write_reg(int reg, unsigned int val);
+
+extern void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
+ int result);
+#endif
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index b0cc1a34db27..ac1568a6fac0 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -50,3 +50,6 @@ config KVM_COMPAT
config HAVE_KVM_IRQ_BYPASS
bool
+
+config HAVE_KVM_NO_POLL
+ bool
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 81e7f4e5f33c..3b63d067b393 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2221,7 +2221,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
u64 block_ns;
start = cur = ktime_get();
- if (vcpu->halt_poll_ns) {
+ if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
++vcpu->stat.halt_attempted_poll;