Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Thumshirn <jthumshirn@suse.de>2018-07-04 09:17:31 +0200
committerJohannes Thumshirn <jthumshirn@suse.de>2018-07-04 09:17:31 +0200
commit25ac2d1344b4ed8e4c9a8505b2b678e5e0acba66 (patch)
tree7fca87cb5bd7c3fe642e2012da547e03d4b293bc
parentad5cbcc86e17312fdd92924525ce06ab602a07f2 (diff)
parent2f37da154670922aeec4cd1327c38d620a66c9e1 (diff)
Conflicts: series.conf suse-commit: b28389e19fc1d717d555f0eb46a7dbcfb479c144
-rw-r--r--Documentation/sysctl/fs.txt36
-rw-r--r--arch/x86/include/asm/barrier.h2
-rw-r--r--arch/x86/kernel/stacktrace.c13
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/cpufreq/intel_pstate.c179
-rw-r--r--drivers/firmware/dell_rbu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c49
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c7
-rw-r--r--drivers/hid/Kconfig7
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-lenovo.c36
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c22
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/wacom_sys.c12
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/iio/adc/ad7793.c75
-rw-r--r--drivers/iio/buffer/kfifo_buf.c7
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/media/usb/uvc/uvc_video.c24
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/net/can/spi/hi311x.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c29
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/phy.c175
-rw-r--r--drivers/net/ppp/ppp_generic.c40
-rw-r--r--drivers/net/ppp/pppoe.c11
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c5
-rw-r--r--drivers/net/wan/hdlc_ppp.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/nvme/host/core.c55
-rw-r--r--drivers/nvme/host/fabrics.c75
-rw-r--r--drivers/nvme/host/fabrics.h41
-rw-r--r--drivers/nvme/host/fc.c160
-rw-r--r--drivers/nvme/host/multipath.c22
-rw-r--r--drivers/nvme/host/nvme.h13
-rw-r--r--drivers/nvme/host/rdma.c17
-rw-r--r--drivers/nvme/target/fc.c2
-rw-r--r--drivers/nvme/target/loop.c20
-rw-r--r--drivers/pci/pci-driver.c17
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/regulator/of_regulator.c13
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c49
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c13
-rw-r--r--drivers/soc/fsl/qbman/qman.c28
-rw-r--r--drivers/tty/serial/8250/8250_omap.c16
-rw-r--r--drivers/tty/serial/altera_uart.c12
-rw-r--r--drivers/tty/serial/arc_uart.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c4
-rw-r--r--drivers/tty/serial/imx.c6
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/samsung.c11
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/vhost/net.c1
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/w1/masters/mxc_w1.c20
-rw-r--r--fs/binfmt_misc.c12
-rw-r--r--fs/ceph/inode.c1
-rw-r--r--fs/cifs/cifssmb.c12
-rw-r--r--fs/cifs/connect.c3
-rw-r--r--fs/cifs/smb2pdu.c12
-rw-r--r--fs/nfs/callback_proc.c7
-rw-r--r--fs/nfs/file.c18
-rw-r--r--fs/nfs/nfs4proc.c14
-rw-r--r--fs/proc/fd.c13
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/regulator/consumer.h1
-rw-r--r--include/net/arp.h3
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/sch_generic.h19
-rw-r--r--kernel/locking/qspinlock.c25
-rw-r--r--kernel/sysctl.c9
-rw-r--r--lib/kobject.c46
-rw-r--r--net/core/dev.c41
-rw-r--r--net/core/devlink.c16
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock_reuseport.c35
-rw-r--r--net/dccp/ccids/ccid2.c3
-rw-r--r--net/ieee802154/6lowpan/core.c12
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/fib_semantics.c5
-rw-r--r--net/ipv4/inet_fragment.c3
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c30
-rw-r--r--net/ipv4/tcp_timer.c15
-rw-r--r--net/ipv6/datagram.c21
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/seg6_iptunnel.c7
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/kcm/kcmsock.c24
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/netlink/genetlink.c12
-rw-r--r--net/rxrpc/output.c2
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/cls_u32.c3
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sctp/ipv6.c10
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c5
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c9
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c5
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h2
-rw-r--r--net/tipc/node.c26
-rw-r--r--scripts/kconfig/confdata.c2
-rw-r--r--scripts/kconfig/expr.c2
-rw-r--r--scripts/kconfig/menu.c1
-rw-r--r--scripts/kconfig/zconf.y33
-rw-r--r--sound/pci/hda/patch_ca0132.c72
-rw-r--r--sound/pci/hda/patch_realtek.c20
142 files changed, 1517 insertions, 880 deletions
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 35e17f748ca7..01274048df7f 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -272,6 +272,42 @@ in a mount namespace.
==============================================================
+procfs-drop-fd-dentries:
+
+* SUSE-specific; This option may be removed in a future release.
+
+This option controls when the proc files representing a task's
+opene files are removed. It applies to the following directories:
+- /proc/pid/fd
+- /proc/pid/fdinfo
+- /proc/pid/task/*/fd
+- /proc/pid/task/*/fdinfo
+
+By default, dentries belonging to tasks that are still running
+will be retained and those belonging to exited tasks will be
+dropped immediately.
+
+This policy ensures that memory is not wasted, but can run into
+scalability issues on very large systems when a task with thousands
+of threads and many open files exits. When many tasks exit
+simultaneously, substantial contention on the global inode spinlock
+may result in suboptimal performance of the system until the inodes
+are released.
+
+When set to "0" (default), the policy is to retain dentries for running
+tasks and delete dentries from tasks which have exited immediately. Once
+the dentry is released, the inode will be freed immediately.
+
+When set to "1", the policy is to delete the dentries immediately after
+the last reference is dropped. Once the dentry is released, the inode
+will be freed immediately. This ensures that the thread which created
+the inodes will also clean them up, eliminating much of the lock
+contention. The tradeoff is that frequent use of fd/fdinfo will be
+slower as these files will need to be recreated each time they
+are accessed.
+
+==============================================================
+
2. /proc/sys/fs/binfmt_misc
----------------------------------------------------------
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index fecdcb8d461c..9c358dc11b41 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -37,7 +37,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
{
unsigned long mask;
- asm ("cmp %1,%2; sbb %0,%0;"
+ asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"g"(size),"r" (index)
:"cc");
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 32983bf642d1..7627455047c2 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -81,16 +81,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
-#define STACKTRACE_DUMP_ONCE(task) ({ \
- static bool __section(.data.unlikely) __dumped; \
- \
- if (!__dumped) { \
- __dumped = true; \
- WARN_ON(1); \
- show_stack(task, NULL); \
- } \
-})
-
static int __always_inline
__save_stack_trace_reliable(struct stack_trace *trace,
struct task_struct *task)
@@ -99,7 +89,8 @@ __save_stack_trace_reliable(struct stack_trace *trace,
struct pt_regs *regs;
unsigned long addr;
- for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
+ for (unwind_start(&state, task, NULL, NULL);
+ !unwind_done(&state) && !unwind_error(&state);
unwind_next_frame(&state)) {
regs = unwind_get_entry_regs(&state, NULL);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 349d0319f0cd..ca551bd3c964 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7401,6 +7401,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
+ /* CPL=0 must be checked manually. */
+ if (vmx_get_cpl(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
if (vmx->nested.vmxon) {
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
return kvm_skip_emulated_instruction(vcpu);
@@ -7460,6 +7466,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
*/
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
{
+ if (vmx_get_cpl(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
if (!to_vmx(vcpu)->nested.vmxon) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
@@ -7793,7 +7804,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &gva))
return 1;
- /* _system ok, as hardware has verified cpl=0 */
+ /* _system ok, nested_vmx_check_permission has verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
}
@@ -7936,7 +7947,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva))
return 1;
- /* ok to use *_system, as hardware has verified cpl=0 */
+ /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) {
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 6f4219d7bb74..972877f1f489 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -105,7 +105,7 @@ void __init acpi_watchdog_init(void)
for (i = 0; i < wdat->entries; i++) {
const struct acpi_generic_address *gas;
struct resource_entry *rentry;
- struct resource res;
+ struct resource res = {};
bool found;
gas = &entries[i].register_region;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 535040662615..032d92127adb 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1332,7 +1332,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
- return NULL;
+ return ERR_PTR(-ENOMEM);
dir->class = class;
kobject_init(&dir->kobj, &class_dir_ktype);
@@ -1342,7 +1342,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
if (retval < 0) {
kobject_put(&dir->kobj);
- return NULL;
+ return ERR_PTR(retval);
}
return &dir->kobj;
}
@@ -1649,6 +1649,10 @@ int device_add(struct device *dev)
parent = get_device(dev->parent);
kobj = get_device_parent(dev, parent);
+ if (IS_ERR(kobj)) {
+ error = PTR_ERR(kobj);
+ goto parent_error;
+ }
if (kobj)
dev->kobj.parent = kobj;
@@ -1747,6 +1751,7 @@ done:
kobject_del(&dev->kobj);
Error:
cleanup_glue_dir(dev, glue_dir);
+parent_error:
put_device(parent);
name_error:
kfree(dev->p);
@@ -2566,6 +2571,11 @@ int device_move(struct device *dev, struct device *new_parent,
device_pm_lock();
new_parent = get_device(new_parent);
new_parent_kobj = get_device_parent(dev, new_parent);
+ if (IS_ERR(new_parent_kobj)) {
+ error = PTR_ERR(new_parent_kobj);
+ put_device(new_parent);
+ goto out;
+ }
pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
__func__, new_parent ? dev_name(new_parent) : "<NULL>");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 99e477910679..e7a6d9c615c8 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -249,6 +249,11 @@ struct global_params {
* preference/bias
* @epp_saved: Saved EPP/EPB during system suspend or CPU offline
* operation
+ * @hwp_req_cached: Cached value of the last HWP Request MSR
+ * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
+ * @last_io_update: Last time when IO wake flag was set
+ * @sched_flags: Store scheduler flags for possible cross CPU update
+ * @hwp_boost_min: Last HWP boosted min performance
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -283,6 +288,11 @@ struct cpudata {
s16 epp_policy;
s16 epp_default;
s16 epp_saved;
+ u64 hwp_req_cached;
+ u64 hwp_cap_cached;
+ u64 last_io_update;
+ unsigned int sched_flags;
+ u32 hwp_boost_min;
};
static struct cpudata **all_cpu_data;
@@ -349,6 +359,7 @@ static struct pstate_adjust_policy pid_params __read_mostly = {
static int hwp_active __read_mostly;
static bool per_cpu_limits __read_mostly;
+static bool hwp_boost __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -803,6 +814,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
u64 cap;
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+ WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
if (global.no_turbo)
*current_max = HWP_GUARANTEED_PERF(cap);
else
@@ -877,6 +889,7 @@ update_epp:
intel_pstate_set_epb(cpu, epp);
}
skip_epp:
+ WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
@@ -1205,6 +1218,30 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
return count;
}
+static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", hwp_boost);
+}
+
+static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &input);
+ if (ret)
+ return ret;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ hwp_boost = !!input;
+ intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return count;
+}
+
show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);
@@ -1214,6 +1251,7 @@ define_one_global_rw(max_perf_pct);
define_one_global_rw(min_perf_pct);
define_one_global_ro(turbo_pct);
define_one_global_ro(num_pstates);
+define_one_global_rw(hwp_dynamic_boost);
static struct attribute *intel_pstate_attributes[] = {
&status.attr,
@@ -1254,6 +1292,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
WARN_ON(rc);
+ if (hwp_active) {
+ rc = sysfs_create_file(intel_pstate_kobject,
+ &hwp_dynamic_boost.attr);
+ WARN_ON(rc);
+ }
}
/************************** sysfs end ************************/
@@ -1566,6 +1609,116 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
intel_pstate_set_min_pstate(cpu);
}
+/*
+ * Long hold time will keep high perf limits for long time,
+ * which negatively impacts perf/watt for some workloads,
+ * like specpower. 3ms is based on experiements on some
+ * workoads.
+ */
+static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
+
+static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
+{
+ u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
+ u32 max_limit = (hwp_req & 0xff00) >> 8;
+ u32 min_limit = (hwp_req & 0xff);
+ u32 boost_level1;
+
+ /*
+ * Cases to consider (User changes via sysfs or boot time):
+ * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
+ * No boost, return.
+ * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
+ * Should result in one level boost only for P0.
+ * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
+ * Should result in two level boost:
+ * (min + p1)/2 and P1.
+ * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
+ * Should result in three level boost:
+ * (min + p1)/2, P1 and P0.
+ */
+
+ /* If max and min are equal or already at max, nothing to boost */
+ if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
+ return;
+
+ if (!cpu->hwp_boost_min)
+ cpu->hwp_boost_min = min_limit;
+
+ /* level at half way mark between min and guranteed */
+ boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
+
+ if (cpu->hwp_boost_min < boost_level1)
+ cpu->hwp_boost_min = boost_level1;
+ else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
+ cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
+ else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
+ max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
+ cpu->hwp_boost_min = max_limit;
+ else
+ return;
+
+ hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
+ wrmsrl(MSR_HWP_REQUEST, hwp_req);
+ cpu->last_update = cpu->sample.time;
+}
+
+static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
+{
+ if (cpu->hwp_boost_min) {
+ bool expired;
+
+ /* Check if we are idle for hold time to boost down */
+ expired = time_after64(cpu->sample.time, cpu->last_update +
+ hwp_boost_hold_time_ns);
+ if (expired) {
+ wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
+ cpu->hwp_boost_min = 0;
+ }
+ }
+ cpu->last_update = cpu->sample.time;
+}
+
+static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
+ u64 time)
+{
+ cpu->sample.time = time;
+
+ if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
+ bool do_io = false;
+
+ cpu->sched_flags = 0;
+ /*
+ * Set iowait_boost flag and update time. Since IO WAIT flag
+ * is set all the time, we can't just conclude that there is
+ * some IO bound activity is scheduled on this CPU with just
+ * one occurrence. If we receive at least two in two
+ * consecutive ticks, then we treat as boost candidate.
+ */
+ if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
+ do_io = true;
+
+ cpu->last_io_update = time;
+
+ if (do_io)
+ intel_pstate_hwp_boost_up(cpu);
+
+ } else {
+ intel_pstate_hwp_boost_down(cpu);
+ }
+}
+
+static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
+ u64 time, unsigned int flags)
+{
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
+
+ cpu->sched_flags |= flags;
+
+ if (smp_processor_id() == cpu->cpu)
+ intel_pstate_update_util_hwp_local(cpu, time);
+}
+
static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
@@ -1913,6 +2066,12 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
static bool pid_in_use(void);
+static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs),
+ {}
+};
+
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
@@ -1943,6 +2102,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_disable_ee(cpunum);
intel_pstate_hwp_enable(cpu);
+
+ id = x86_match_cpu(intel_pstate_hwp_boost_ids);
+ if (id)
+ hwp_boost = true;
} else if (pid_in_use()) {
intel_pstate_pid_reset(cpu);
}
@@ -1958,7 +2121,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
- if (hwp_active)
+ if (hwp_active && !hwp_boost)
return;
if (cpu->update_util_set)
@@ -1967,7 +2130,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
- pstate_funcs.update_util);
+ (hwp_active ?
+ intel_pstate_update_util_hwp :
+ intel_pstate_update_util));
cpu->update_util_set = true;
}
@@ -2079,8 +2244,16 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_set_update_util_hook(policy->cpu);
}
- if (hwp_active)
+ if (hwp_active) {
+ /*
+ * When hwp_boost was active before and dynamically it
+ * was turned off, in that case we need to clear the
+ * update util hook.
+ */
+ if (!hwp_boost)
+ intel_pstate_clear_update_util_hook(policy->cpu);
intel_pstate_hwp_set(policy->cpu);
+ }
mutex_unlock(&intel_pstate_limits_lock);
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 2f452f1f7c8a..53f27a6e2d76 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -45,6 +45,7 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
+#include <asm/set_memory.h>
MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
@@ -181,6 +182,11 @@ static int create_packet(void *data, size_t length)
packet_data_temp_buf = NULL;
}
}
+ /*
+ * set to uncachable or it may never get written back before reboot
+ */
+ set_memory_uc((unsigned long)packet_data_temp_buf, 1 << ordernum);
+
spin_lock(&rbu_data.lock);
newpacket->data = packet_data_temp_buf;
@@ -349,6 +355,8 @@ static void packet_empty_list(void)
* to make sure there are no stale RBU packets left in memory
*/
memset(newpacket->data, 0, rbu_data.packetsize);
+ set_memory_wb((unsigned long)newpacket->data,
+ 1 << newpacket->ordernum);
free_pages((unsigned long) newpacket->data,
newpacket->ordernum);
kfree(newpacket);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 74a0aa537b5a..b6f74bd0f5cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -460,7 +460,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
}
static int
-eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb,
+ unsigned int i, unsigned batch_idx,
+ struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
int err;
@@ -493,6 +495,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
eb->flags[i] = entry->flags;
vma->exec_flags = &eb->flags[i];
+ /*
+ * SNA is doing fancy tricks with compressing batch buffers, which leads
+ * to negative relocation deltas. Usually that works out ok since the
+ * relocate address is still positive, except when the batch is placed
+ * very low in the GTT. Ensure this doesn't happen.
+ *
+ * Note that actual hangs have only been observed on gen7, but for
+ * paranoia do it everywhere.
+ */
+ if (i == batch_idx) {
+ if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ if (eb->reloc_cache.has_fence)
+ eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+
+ eb->batch = vma;
+ }
+
err = 0;
if (eb_pin_vma(eb, entry, vma)) {
if (entry->offset != vma->node.start) {
@@ -687,7 +707,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
struct drm_i915_gem_object *obj;
- unsigned int i;
+ unsigned int i, batch;
int err;
if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -699,6 +719,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
+ batch = eb_batch_index(eb);
+
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -740,33 +762,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
lut->handle = handle;
add_vma:
- err = eb_add_vma(eb, i, vma);
+ err = eb_add_vma(eb, i, batch, vma);
if (unlikely(err))
goto err_vma;
GEM_BUG_ON(vma != eb->vma[i]);
GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
}
- /* take note of the batch buffer before we might reorder the lists */
- i = eb_batch_index(eb);
- eb->batch = eb->vma[i];
- GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
-
- /*
- * SNA is doing fancy tricks with compressing batch buffers, which leads
- * to negative relocation deltas. Usually that works out ok since the
- * relocate address is still positive, except when the batch is placed
- * very low in the GTT. Ensure this doesn't happen.
- *
- * Note that actual hangs have only been observed on gen7, but for
- * paranoia do it everywhere.
- */
- if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
- if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 34e4c8104bee..88cf2039e2b0 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -634,7 +634,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
struct qxl_cursor_cmd *cmd;
struct qxl_cursor *cursor;
struct drm_gem_object *obj;
- struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
+ struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
int ret;
void *user_ptr;
int size = 64*64*4;
@@ -688,7 +688,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cursor_bo, 0);
cmd->type = QXL_CURSOR_SET;
- qxl_bo_unref(&qcrtc->cursor_bo);
+ old_cursor_bo = qcrtc->cursor_bo;
qcrtc->cursor_bo = cursor_bo;
cursor_bo = NULL;
} else {
@@ -708,6 +708,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ if (old_cursor_bo)
+ qxl_bo_unref(&old_cursor_bo);
+
qxl_bo_unref(&cursor_bo);
return;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1aefdf4a1ccc..ef4ea41f41fc 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -429,10 +429,11 @@ config HID_LENOVO
select NEW_LEDS
select LEDS_CLASS
---help---
- Support for Lenovo devices that are not fully compliant with HID standard.
+ Support for IBM/Lenovo devices that are not fully compliant with HID standard.
- Say Y if you want support for the non-compliant features of the Lenovo
- Thinkpad standalone keyboards, e.g:
+ Say Y if you want support for horizontal scrolling of the IBM/Lenovo
+ Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad
+ standalone keyboards, e.g:
- ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint
configuration)
- ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b833676e4727..333b098f4b99 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -524,6 +524,13 @@
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
+#define USB_VENDOR_ID_IBM 0x04b3
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO 0x3103
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL 0x3105
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL 0x3108
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO 0x3109
+
#define USB_VENDOR_ID_IDEACOM 0x1cb6
#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
@@ -651,6 +658,7 @@
#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
#define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047
#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
+#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 1ac4ff4d57a6..643b6eb54442 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -6,6 +6,17 @@
*
* Copyright (c) 2012 Bernhard Seibold
* Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk>
+ *
+ * Linux IBM/Lenovo Scrollpoint mouse driver:
+ * - IBM Scrollpoint III
+ * - IBM Scrollpoint Pro
+ * - IBM Scrollpoint Optical
+ * - IBM Scrollpoint Optical 800dpi
+ * - IBM Scrollpoint Optical 800dpi Pro
+ * - Lenovo Scrollpoint Optical
+ *
+ * Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com>
+ * Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com>
*/
/*
@@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
return 0;
}
+static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ if (usage->hid == HID_GD_Z) {
+ hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
+ return 1;
+ }
+ return 0;
+}
+
static int lenovo_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
@@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_CBTKBD:
return lenovo_input_mapping_cptkbd(hdev, hi, field,
usage, bit, max);
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_III:
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO:
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL:
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL:
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO:
+ case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL:
+ return lenovo_input_mapping_scrollpoint(hdev, hi, field,
+ usage, bit, max);
default:
return 0;
}
@@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
{ }
};
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 20d824f74f99..90d7be08fea0 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -204,8 +204,7 @@ static void ish_remove(struct pci_dev *pdev)
kfree(ishtp_dev);
}
-#ifdef CONFIG_PM
-static struct device *ish_resume_device;
+static struct device __maybe_unused *ish_resume_device;
/* 50ms to get resume response */
#define WAIT_FOR_RESUME_ACK_MS 50
@@ -219,7 +218,7 @@ static struct device *ish_resume_device;
* in that case a simple resume message is enough, others we need
* a reset sequence.
*/
-static void ish_resume_handler(struct work_struct *work)
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
{
struct pci_dev *pdev = to_pci_dev(ish_resume_device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -261,7 +260,7 @@ static void ish_resume_handler(struct work_struct *work)
*
* Return: 0 to the pm core
*/
-static int ish_suspend(struct device *device)
+static int __maybe_unused ish_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -287,7 +286,7 @@ static int ish_suspend(struct device *device)
return 0;
}
-static DECLARE_WORK(resume_work, ish_resume_handler);
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
/**
* ish_resume() - ISH resume callback
* @device: device pointer
@@ -296,7 +295,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
*
* Return: 0 to the pm core
*/
-static int ish_resume(struct device *device)
+static int __maybe_unused ish_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -310,21 +309,14 @@ static int ish_resume(struct device *device)
return 0;
}
-static const struct dev_pm_ops ish_pm_ops = {
- .suspend = ish_suspend,
- .resume = ish_resume,
-};
-#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
-#else
-#define ISHTP_ISH_PM_OPS NULL
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
static struct pci_driver ish_driver = {
.name = KBUILD_MODNAME,
.id_table = ish_pci_tbl,
.probe = ish_probe,
.remove = ish_remove,
- .driver.pm = ISHTP_ISH_PM_OPS,
+ .driver.pm = &ish_pm_ops,
};
module_pci_driver(ish_driver);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 5f382fedc2ab..bcb29b69aee7 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -416,7 +416,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
list_del(&device->device_link);
spin_unlock_irqrestore(&dev->device_list_lock, flags);
dev_err(dev->devc, "Failed to register ISHTP client device\n");
- kfree(device);
+ put_device(&device->dev);
return NULL;
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8f4fd924af1f..aec2566dfa41 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -284,6 +284,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
}
}
+ /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+ hdev->product == 0x0358 &&
+ WACOM_PEN_FIELD(field) &&
+ wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
+ field->logical_maximum = 43200;
+ }
+
switch (usage->hid) {
case HID_GD_X:
features->x_max = field->logical_maximum;
@@ -1102,8 +1110,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
devres->root = root;
error = sysfs_create_group(devres->root, group);
- if (error)
+ if (error) {
+ devres_free(devres);
return error;
+ }
devres_add(&wacom->hdev->dev, devres);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 217c78711d65..95ccef05d230 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
* TODO: We could potentially loop and retry in the case
* of MSP_TWI_XFER_TIMEOUT.
*/
- return -1;
+ return -EIO;
}
- return 0;
+ return num;
}
static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index e4be86b3de9a..7235c7302bb7 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
}
mutex_unlock(&vb->lock);
}
- return 0;
+ return num;
error:
mutex_unlock(&vb->lock);
return error;
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 47c3d7f32900..07246a6037e3 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
33, 0, 17, 16, 12, 10, 8, 6, 4};
-static ssize_t ad7793_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7793_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n",
- st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
-}
-
-static ssize_t ad7793_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7793_state *st = iio_priv(indio_dev);
- long lval;
- int i, ret;
-
- ret = kstrtol(buf, 10, &lval);
- if (ret)
- return ret;
-
- if (lval == 0)
- return -EINVAL;
-
- for (i = 0; i < 16; i++)
- if (lval == st->chip_info->sample_freq_avail[i])
- break;
- if (i == 16)
- return -EINVAL;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- st->mode &= ~AD7793_MODE_RATE(-1);
- st->mode |= AD7793_MODE_RATE(i);
- ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
- iio_device_release_direct_mode(indio_dev);
-
- return len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- ad7793_read_frequency,
- ad7793_write_frequency);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
@@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
ad7793_show_scale_available, NULL, 0);
static struct attribute *ad7793_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
NULL
@@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
};
static struct attribute *ad7797_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
NULL
};
@@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
*val -= offset;
}
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->chip_info
+ ->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
+ return IIO_VAL_INT;
}
return -EINVAL;
}
@@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val) {
+ ret = -EINVAL;
+ break;
+ }
+
+ for (i = 0; i < 16; i++)
+ if (val == st->chip_info->sample_freq_avail[i])
+ break;
+
+ if (i == 16) {
+ ret = -EINVAL;
+ break;
+ }
+
+ st->mode &= ~AD7793_MODE_RATE(-1);
+ st->mode |= AD7793_MODE_RATE(i);
+ ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
+ st->mode);
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 047fe757ab97..d0cfce1a3cfa 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -27,6 +27,13 @@ static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
+ /*
+ * Make sure we don't overflow an unsigned int after kfifo rounds up to
+ * the next power of 2.
+ */
+ if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
+ return -EINVAL;
+
return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
bytes_per_datum, GFP_KERNEL);
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index a1ebb58a095d..be2cbd8b0114 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -126,7 +126,7 @@ static const struct xpad_device {
u8 mapping;
u8 xtype;
} xpad_device[] = {
- { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 47d93a938dde..540a72a5bea3 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -163,14 +163,27 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
}
}
+static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
+{
+ /*
+ * Return the size of the video probe and commit controls, which depends
+ * on the protocol version.
+ */
+ if (stream->dev->uvc_version < 0x0110)
+ return 26;
+ else if (stream->dev->uvc_version < 0x0150)
+ return 34;
+ else
+ return 48;
+}
+
static int uvc_get_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe, __u8 query)
{
__u8 *data;
- __u16 size;
+ __u16 size = uvc_video_ctrl_size(stream);
int ret;
- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
query == UVC_GET_DEF)
return -EIO;
@@ -225,7 +238,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
- if (size == 34) {
+ if (size >= 34) {
ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
ctrl->bmFramingInfo = data[30];
ctrl->bPreferedVersion = data[31];
@@ -255,10 +268,9 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe)
{
__u8 *data;
- __u16 size;
+ __u16 size = uvc_video_ctrl_size(stream);
int ret;
- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
data = kzalloc(size, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -275,7 +287,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
- if (size == 34) {
+ if (size >= 34) {
put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
data[30] = ctrl->bmFramingInfo;
data[31] = ctrl->bPreferedVersion;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6cafcb9d6004..47c3801440b2 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3242,7 +3242,7 @@ int sdhci_setup_host(struct sdhci_host *host)
override_timeout_clk = host->timeout_clk;
if (host->version > SDHCI_SPEC_300) {
- pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
+ pr_info("%s: Unknown controller version (%d). You may experience problems.\n",
mmc_hostname(mmc), host->version);
}
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 5590c559a8ca..53e320c92a8b 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -91,6 +91,7 @@
#define HI3110_STAT_BUSOFF BIT(2)
#define HI3110_STAT_ERRP BIT(3)
#define HI3110_STAT_ERRW BIT(4)
+#define HI3110_STAT_TXMTY BIT(7)
#define HI3110_BTR0_SJW_SHIFT 6
#define HI3110_BTR0_BRP_SHIFT 0
@@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net,
struct hi3110_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
+ mutex_lock(&priv->hi3110_lock);
bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
+ mutex_unlock(&priv->hi3110_lock);
return 0;
}
@@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
}
}
- if (intf == 0)
- break;
-
- if (intf & HI3110_INT_TXCPLT) {
+ if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
net->stats.tx_packets++;
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
@@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
}
netif_wake_queue(net);
}
+
+ if (intf == 0)
+ break;
}
mutex_unlock(&priv->hi3110_lock);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 38392a520725..38dc075ae095 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -420,6 +420,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
struct net_device *netdev = pdata->netdev;
int ret = 0;
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index e278e3d96ee0..d8ada37ba04b 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* Optional regulator for PHY */
priv->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(priv->regulator)) {
- if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out_clk_disable;
+ }
dev_err(dev, "no regulator found\n");
priv->regulator = NULL;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b7eb4ded5486..47f1cd2c7974 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -809,9 +809,11 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
struct net_device *ndev = priv->netdev;
- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int txbds_processed = 0;
struct bcm_sysport_cb *cb;
+ unsigned int txbds_ready;
+ unsigned int c_index;
u32 hw_ind;
/* Clear status before servicing to reduce spurious interrupts */
@@ -824,29 +826,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
- last_c_index = ring->c_index;
- num_tx_cbs = ring->size;
-
- c_index &= (num_tx_cbs - 1);
-
- if (c_index >= last_c_index)
- last_tx_cn = c_index - last_c_index;
- else
- last_tx_cn = num_tx_cbs - last_c_index + c_index;
+ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
netif_dbg(priv, tx_done, ndev,
- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
- ring->index, c_index, last_tx_cn, last_c_index);
+ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+ ring->index, ring->c_index, c_index, txbds_ready);
- while (last_tx_cn-- > 0) {
- cb = ring->cbs + last_c_index;
+ while (txbds_processed < txbds_ready) {
+ cb = &ring->cbs[ring->clean_index];
bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
- last_c_index++;
- last_c_index &= (num_tx_cbs - 1);
+ txbds_processed++;
+
+ if (likely(ring->clean_index < ring->size - 1))
+ ring->clean_index++;
+ else
+ ring->clean_index = 0;
}
ring->c_index = c_index;
@@ -1354,6 +1350,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index;
ring->size = size;
+ ring->clean_index = 0;
ring->alloc_size = ring->size;
ring->desc_cpu = p;
ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 77a51c167a69..ceda51523192 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -689,7 +689,7 @@ struct bcm_sysport_tx_ring {
unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */
- unsigned int p_index; /* Current producer index */
+ unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index d9e3fcec3d8b..dd1ffabc693f 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1986,7 +1986,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
}
if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
percpu_stats->tx_fifo_errors++;
return err;
}
@@ -2254,7 +2253,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
/* prefetch the first 64 bytes of the frame or the SGT start */
prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd));
- fd_format = qm_fd_get_format(fd);
/* The only FD types that we may receive are contig and S/G */
WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
@@ -2275,8 +2273,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
skb_len = skb->len;
- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
+ percpu_stats->rx_dropped++;
return qman_cb_dqrr_consume;
+ }
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += skb_len;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ac2a6377ce2a..d9ac22ec2304 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3539,6 +3539,8 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d06331a049a..6bd7afeae1ac 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3545,15 +3545,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
}
break;
case e1000_pch_spt:
- if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
- /* Stable 24MHz frequency */
- incperiod = INCPERIOD_24MHZ;
- incvalue = INCVALUE_24MHZ;
- shift = INCVALUE_SHIFT_24MHZ;
- adapter->cc.shift = shift;
- break;
- }
- return -EINVAL;
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHZ;
+ incvalue = INCVALUE_24MHZ;
+ shift = INCVALUE_SHIFT_24MHZ;
+ adapter->cc.shift = shift;
+ break;
case e1000_pch_cnp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d0bf72f5c66b..b12900a06c57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -934,6 +934,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
+void i40e_client_update_msix_info(struct i40e_pf *pf);
int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 1b1e2acbd07f..b07770d53940 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -287,6 +287,17 @@ out:
return capable;
}
+void i40e_client_update_msix_info(struct i40e_pf *pf)
+{
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev || !cdev->client)
+ return;
+
+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+}
+
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
@@ -328,9 +339,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
return;
}
- cdev->lan_info.msix_count = pf->num_iwarp_msix;
- cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
-
mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
if (mac)
@@ -340,6 +348,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
cdev->client = registered_client;
pf->cinst = cdev;
+
+ i40e_client_update_msix_info(pf);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 9f63e4e1bfe7..f8343bff0406 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1601,7 +1601,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
u16 sections = 0;
u8 netdev_tc = 0;
- u16 numtc = 0;
+ u16 numtc = 1;
u16 qcount;
u8 offset;
u16 qmap;
@@ -1611,9 +1611,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0;
+ /* Number of queues per enabled TC */
+ num_tc_qps = vsi->alloc_queue_pairs;
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
/* Find numtc from enabled TC bitmap */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i)) /* TC is enabled */
numtc++;
}
@@ -1621,18 +1623,17 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
numtc = 1;
}
- } else {
- /* At least TC0 is enabled in case of non-DCB case */
- numtc = 1;
+ num_tc_qps = num_tc_qps / numtc;
+ num_tc_qps = min_t(int, num_tc_qps,
+ i40e_pf_get_max_q_per_tc(pf));
}
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
- /* Number of queues per enabled TC */
- qcount = vsi->alloc_queue_pairs;
- num_tc_qps = qcount / numtc;
- num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
+ /* Do not allow use more TC queue pairs than MSI-X vectors exist */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1643,9 +1644,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
switch (vsi->type) {
case I40E_VSI_MAIN:
- qcount = min_t(int, pf->alloc_rss_size,
- num_tc_qps);
- break;
+ if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED)) ||
+ vsi->tc_config.enabled_tc != 1) {
+ qcount = min_t(int, pf->alloc_rss_size,
+ num_tc_qps);
+ break;
+ }
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
@@ -10744,6 +10749,9 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
vsi->veb_idx = veb->idx;
vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ i40e_client_update_msix_info(pf);
+
return 0;
}
@@ -12110,6 +12118,11 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 997189cfe7fd..54f57dc618f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -337,13 +337,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(10000, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index b538b10e095d..c114727b3400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -595,14 +595,16 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
u32 tb_id)
{
+ struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_vr *vr;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr)
return ERR_PTR(-EBUSY);
- vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
- if (IS_ERR(vr->fib4))
- return ERR_CAST(vr->fib4);
+ fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(fib4))
+ return ERR_CAST(fib4);
+ vr->fib4 = fib4;
vr->tb_id = tb_id;
return vr;
}
@@ -847,11 +849,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
dipn = htonl(dip);
dev = mlxsw_sp->rifs[rif]->dev;
n = neigh_lookup(&arp_tbl, &dipn, dev);
- if (!n) {
- netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
- &dip);
+ if (!n)
return;
- }
netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
neigh_event_send(n, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index f4bb0c0b7c1d..db980dc2478f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -882,6 +882,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -891,9 +892,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, action, local_port);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- kfree(sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+out:
+ kfree(sfd_pl);
return err;
}
@@ -918,6 +926,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
bool adding, bool dynamic)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -928,9 +937,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
lag_vid, lag_id);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- kfree(sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+out:
+ kfree(sfd_pl);
return err;
}
@@ -964,6 +980,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
u16 fid, u16 mid, bool adding)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -973,7 +990,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
MLXSW_REG_SFD_REC_ACTION_NOP, mid);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
kfree(sfd_pl);
return err;
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index bab13613b138..cc9cb7adc91d 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2826,6 +2826,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_alloc_ordered_workqueue;
}
+ err = rocker_probe_ports(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "failed to probe ports\n");
+ goto err_probe_ports;
+ }
+
/* Only FIBs pointing to our own netdevs are programmed into
* the device, so no need to pass a callback.
*/
@@ -2836,20 +2842,14 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
- err = rocker_probe_ports(rocker);
- if (err) {
- dev_err(&pdev->dev, "failed to probe ports\n");
- goto err_probe_ports;
- }
-
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
(int)sizeof(rocker->hw.id), &rocker->hw.id);
return 0;
-err_probe_ports:
- unregister_fib_notifier(&rocker->fib_nb);
err_register_fib_notifier:
+ rocker_remove_ports(rocker);
+err_probe_ports:
destroy_workqueue(rocker->rocker_owq);
err_alloc_ordered_workqueue:
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
@@ -2877,8 +2877,8 @@ static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
- rocker_remove_ports(rocker);
unregister_fib_notifier(&rocker->fib_nb);
+ rocker_remove_ports(rocker);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
destroy_workqueue(rocker->rocker_owq);
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e84faa83d8ea..4a5c44d7e9f7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -996,7 +996,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
- else if (phy->speed == 10)
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
mac_control |= BIT(18); /* In Band mode */
if (priv->rx_pause)
@@ -1618,6 +1619,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
q_idx = q_idx % cpsw->tx_ch_num;
txch = cpsw->txv[q_idx].ch;
+ txq = netdev_get_tx_queue(ndev, q_idx);
ret = cpsw_tx_packet_submit(priv, skb, txch);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
@@ -1628,15 +1630,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
* tell the kernel to stop sending us tx frames.
*/
if (unlikely(!cpdma_check_free_tx_desc(txch))) {
- txq = netdev_get_tx_queue(ndev, q_idx);
netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
}
return NETDEV_TX_OK;
fail:
ndev->stats.tx_dropped++;
- txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 18a17e309de9..316cb8b29d68 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1037,7 +1037,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
lowerdev_features &= (features | ~NETIF_F_LRO);
features = netdev_increment_features(lowerdev_features, features, mask);
features |= ALWAYS_ON_FEATURES;
- features &= ~NETIF_F_NETNS_LOCAL;
+ features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
return features;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 2620f3df37ee..696bb74c3384 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -769,6 +769,91 @@ static void phy_error(struct phy_device *phydev)
}
/**
+ * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
+ * @phydev: target phy_device struct
+ */
+static int phy_disable_interrupts(struct phy_device *phydev)
+{
+ int err;
+
+ /* Disable PHY interrupts */
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
+ if (err)
+ goto phy_err;
+
+ /* Clear the interrupt */
+ err = phy_clear_interrupt(phydev);
+ if (err)
+ goto phy_err;
+
+ return 0;
+
+phy_err:
+ phy_error(phydev);
+
+ return err;
+}
+
+/**
+ * phy_change - Called by the phy_interrupt to handle PHY changes
+ * @phydev: phy_device struct that interrupted
+ */
+static irqreturn_t phy_change(struct phy_device *phydev)
+{
+ if (phy_interrupt_is_valid(phydev)) {
+ if (phydev->drv->did_interrupt &&
+ !phydev->drv->did_interrupt(phydev))
+ goto ignore;
+
+ if (phy_disable_interrupts(phydev))
+ goto phy_err;
+ }
+
+ mutex_lock(&phydev->lock);
+ if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
+ phydev->state = PHY_CHANGELINK;
+ mutex_unlock(&phydev->lock);
+
+ if (phy_interrupt_is_valid(phydev)) {
+ atomic_dec(&phydev->irq_disable);
+ enable_irq(phydev->irq);
+
+ /* Reenable interrupts */
+ if (PHY_HALTED != phydev->state &&
+ phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
+ goto irq_enable_err;
+ }
+
+ /* reschedule state queue work to run as soon as possible */
+ phy_trigger_machine(phydev, true);
+ return IRQ_HANDLED;
+
+ignore:
+ atomic_dec(&phydev->irq_disable);
+ enable_irq(phydev->irq);
+ return IRQ_NONE;
+
+irq_enable_err:
+ disable_irq(phydev->irq);
+ atomic_inc(&phydev->irq_disable);
+phy_err:
+ phy_error(phydev);
+ return IRQ_NONE;
+}
+
+/**
+ * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
+ * @work: work_struct that describes the work to be done
+ */
+void phy_change_work(struct work_struct *work)
+{
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, phy_queue);
+
+ phy_change(phydev);
+}
+
+/**
* phy_interrupt - PHY interrupt handler
* @irq: interrupt line
* @phy_dat: phy_device pointer
@@ -786,9 +871,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
disable_irq_nosync(irq);
atomic_inc(&phydev->irq_disable);
- phy_change(phydev);
-
- return IRQ_HANDLED;
+ return phy_change(phydev);
}
/**
@@ -806,32 +889,6 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
- * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
- * @phydev: target phy_device struct
- */
-static int phy_disable_interrupts(struct phy_device *phydev)
-{
- int err;
-
- /* Disable PHY interrupts */
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
- if (err)
- goto phy_err;
-
- /* Clear the interrupt */
- err = phy_clear_interrupt(phydev);
- if (err)
- goto phy_err;
-
- return 0;
-
-phy_err:
- phy_error(phydev);
-
- return err;
-}
-
-/**
* phy_start_interrupts - request and enable interrupts for a PHY device
* @phydev: target phy_device struct
*
@@ -882,64 +939,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
EXPORT_SYMBOL(phy_stop_interrupts);
/**
- * phy_change - Called by the phy_interrupt to handle PHY changes
- * @phydev: phy_device struct that interrupted
- */
-void phy_change(struct phy_device *phydev)
-{
- if (phy_interrupt_is_valid(phydev)) {
- if (phydev->drv->did_interrupt &&
- !phydev->drv->did_interrupt(phydev))
- goto ignore;
-
- if (phy_disable_interrupts(phydev))
- goto phy_err;
- }
-
- mutex_lock(&phydev->lock);
- if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
- phydev->state = PHY_CHANGELINK;
- mutex_unlock(&phydev->lock);
-
- if (phy_interrupt_is_valid(phydev)) {
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
-
- /* Reenable interrupts */
- if (PHY_HALTED != phydev->state &&
- phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
- goto irq_enable_err;
- }
-
- /* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev, true);
- return;
-
-ignore:
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
- return;
-
-irq_enable_err:
- disable_irq(phydev->irq);
- atomic_inc(&phydev->irq_disable);
-phy_err:
- phy_error(phydev);
-}
-
-/**
- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
- * @work: work_struct that describes the work to be done
- */
-void phy_change_work(struct work_struct *work)
-{
- struct phy_device *phydev =
- container_of(work, struct phy_device, phy_queue);
-
- phy_change(phydev);
-}
-
-/**
* phy_stop - Bring down the PHY link, and stop checking the status
* @phydev: target phy_device struct
*/
@@ -996,7 +995,7 @@ void phy_start(struct phy_device *phydev)
break;
case PHY_HALTED:
/* make sure interrupts are re-enabled for the PHY */
- if (phydev->irq != PHY_POLL) {
+ if (phy_interrupt_is_valid(phydev)) {
err = phy_enable_interrupts(phydev);
if (err < 0)
break;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 1df74ceea4aa..93c42fd4957d 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -256,7 +256,7 @@ struct ppp_net {
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
-static void ppp_xmit_process(struct ppp *ppp);
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
@@ -512,13 +512,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
goto out;
}
- skb_queue_tail(&pf->xq, skb);
-
switch (pf->kind) {
case INTERFACE:
- ppp_xmit_process(PF_TO_PPP(pf));
+ ppp_xmit_process(PF_TO_PPP(pf), skb);
break;
case CHANNEL:
+ skb_queue_tail(&pf->xq, skb);
ppp_channel_push(PF_TO_CHANNEL(pf));
break;
}
@@ -1002,17 +1001,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
if (!ifname_is_set)
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
ret = register_netdevice(ppp->dev);
if (ret < 0)
goto err_unit;
atomic_inc(&ppp_unit_count);
- mutex_unlock(&pn->all_ppp_mutex);
-
return 0;
err_unit:
+ mutex_lock(&pn->all_ppp_mutex);
unit_put(&pn->units_idr, ppp->file.index);
err:
mutex_unlock(&pn->all_ppp_mutex);
@@ -1261,8 +1261,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
put_unaligned_be16(proto, pp);
skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
- skb_queue_tail(&ppp->file.xq, skb);
- ppp_xmit_process(ppp);
+ ppp_xmit_process(ppp, skb);
+
return NETDEV_TX_OK;
outf:
@@ -1414,13 +1414,14 @@ static void ppp_setup(struct net_device *dev)
*/
/* Called to do any work queued up on the transmit side that can now be done */
-static void __ppp_xmit_process(struct ppp *ppp)
+static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
- struct sk_buff *skb;
-
ppp_xmit_lock(ppp);
if (!ppp->closing) {
ppp_push(ppp);
+
+ if (skb)
+ skb_queue_tail(&ppp->file.xq, skb);
while (!ppp->xmit_pending &&
(skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb);
@@ -1434,7 +1435,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
ppp_xmit_unlock(ppp);
}
-static void ppp_xmit_process(struct ppp *ppp)
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
local_bh_disable();
@@ -1442,7 +1443,7 @@ static void ppp_xmit_process(struct ppp *ppp)
goto err;
(*this_cpu_ptr(ppp->xmit_recursion))++;
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, skb);
(*this_cpu_ptr(ppp->xmit_recursion))--;
local_bh_enable();
@@ -1452,6 +1453,8 @@ static void ppp_xmit_process(struct ppp *ppp)
err:
local_bh_enable();
+ kfree_skb(skb);
+
if (net_ratelimit())
netdev_err(ppp->dev, "recursion detected\n");
}
@@ -1936,7 +1939,7 @@ static void __ppp_channel_push(struct channel *pch)
if (skb_queue_empty(&pch->file.xq)) {
ppp = pch->ppp;
if (ppp)
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, NULL);
}
}
@@ -3155,6 +3158,15 @@ ppp_connect_channel(struct channel *pch, int unit)
goto outl;
ppp_lock(ppp);
+ spin_lock_bh(&pch->downl);
+ if (!pch->chan) {
+ /* Don't connect unregistered channels */
+ spin_unlock_bh(&pch->downl);
+ ppp_unlock(ppp);
+ ret = -ENOTCONN;
+ goto outl;
+ }
+ spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen;
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4e1da1645b15..5aa59f41bf8c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
struct pppoe_hdr *ph;
struct net_device *dev;
char *start;
+ int hlen;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
-
- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
- 0, GFP_KERNEL);
+ hlen = LL_RESERVED_SPACE(dev);
+ skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
+ dev->needed_tailroom, 0, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
goto end;
}
/* Reserve space for headers. */
- skb_reserve(skb, dev->hard_header_len);
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
/* Copy the data if there is no space for the header or if it's
* read-only.
*/
- if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
+ if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
goto abort;
__skb_push(skb, sizeof(*ph));
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 7852fafdaf09..e6f2a5d4f0d8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2399,7 +2399,7 @@ send_done:
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}
@@ -2685,7 +2685,7 @@ send_done:
if (!nlh) {
err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
- goto errout;
+ return err;
goto send_done;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 07b92985e497..15f056000c57 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1247,6 +1247,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bb10f573f403..c1b020df4c90 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -758,8 +758,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
struct sock *sk,
struct sk_buff *skb)
{
- /* don't divert multicast */
- if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ /* don't divert multicast or local broadcast */
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
+ ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
if (qdisc_tx_is_default(vrf_dev))
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 0d2e00ece804..f3c1d5245978 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL);
proto->restart_counter--;
- } else
+ } else if (netif_carrier_ok(proto->dev))
+ ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
+ 0, NULL);
+ else
ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
0, NULL);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 1610722b8099..747eef82cefd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -174,7 +176,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
const struct fw_img *image)
{
- int sec_idx, idx;
+ int sec_idx, idx, ret;
u32 offset = 0;
/*
@@ -201,17 +203,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
*/
if (sec_idx >= image->num_sec - 1) {
IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
- iwl_free_fw_paging(fwrt);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
/* copy the CSS block to the dram */
IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
sec_idx);
+ if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
+ IWL_ERR(fwrt, "CSS block is larger than paging size\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
- fwrt->fw_paging_db[0].fw_paging_size);
+ image->sec[sec_idx].len);
dma_sync_single_for_device(fwrt->trans->dev,
fwrt->fw_paging_db[0].fw_paging_phys,
fwrt->fw_paging_db[0].fw_paging_size,
@@ -232,6 +240,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) {
struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ if (block->fw_paging_size > image->sec[sec_idx].len - offset) {
+ IWL_ERR(fwrt,
+ "Paging: paging size is larger than remaining data in block %d\n",
+ idx);
+ ret = -EINVAL;
+ goto err;
+ }
+
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
block->fw_paging_size);
@@ -242,19 +258,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_FW(fwrt,
"Paging: copied %d paging bytes to block %d\n",
- fwrt->fw_paging_db[idx].fw_paging_size,
- idx);
+ block->fw_paging_size, idx);
+
+ offset += block->fw_paging_size;
- offset += fwrt->fw_paging_db[idx].fw_paging_size;
+ if (offset > image->sec[sec_idx].len) {
+ IWL_ERR(fwrt,
+ "Paging: offset goes over section size\n");
+ ret = -EINVAL;
+ goto err;
+ }
}
/* copy the last paging block */
if (fwrt->num_of_pages_in_last_blk > 0) {
struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ if (image->sec[sec_idx].len - offset > block->fw_paging_size) {
+ IWL_ERR(fwrt,
+ "Paging: last block is larger than paging size\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
- FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk);
+ image->sec[sec_idx].len - offset);
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
@@ -266,6 +295,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
}
return 0;
+
+err:
+ iwl_free_fw_paging(fwrt);
+ return ret;
}
static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 9ad3f4fe5894..429e00bb02e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1490,14 +1490,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_irqs, num_irqs, i, ret, nr_online_cpus;
+ int max_irqs, num_irqs, i, ret;
u16 pci_cmd;
if (!trans->cfg->mq_rx_supported)
goto enable_msi;
- nr_online_cpus = num_online_cpus();
- max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
+ max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
for (i = 0; i < max_irqs; i++)
trans_pcie->msix_entries[i].entry = i;
@@ -1523,16 +1522,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
* Two interrupts less: non rx causes shared with FBQ and RSS.
* More than two interrupts: we will use fewer RSS queues.
*/
- if (num_irqs <= nr_online_cpus) {
+ if (num_irqs <= max_irqs - 2) {
trans_pcie->trans->num_rx_queues = num_irqs + 1;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
IWL_SHARED_IRQ_FIRST_RSS;
- } else if (num_irqs == nr_online_cpus + 1) {
+ } else if (num_irqs == max_irqs - 1) {
trans_pcie->trans->num_rx_queues = num_irqs;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
} else {
trans_pcie->trans->num_rx_queues = num_irqs - 1;
}
+ WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
trans_pcie->alloc_vecs = num_irqs;
trans_pcie->msix_enabled = true;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 181668a0be0d..ecf468d75728 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -378,6 +378,15 @@ static void nvme_put_ns(struct nvme_ns *ns)
kref_put(&ns->kref, nvme_free_ns);
}
+static inline void nvme_clear_nvme_request(struct request *req)
+{
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ nvme_req(req)->retries = 0;
+ nvme_req(req)->flags = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+}
+
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags, int qid)
{
@@ -394,6 +403,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
return req;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ nvme_clear_nvme_request(req);
nvme_req(req)->cmd = cmd;
return req;
@@ -610,11 +620,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
{
blk_status_t ret = BLK_STS_OK;
- if (!(req->rq_flags & RQF_DONTPREP)) {
- nvme_req(req)->retries = 0;
- nvme_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
+ nvme_clear_nvme_request(req);
switch (req_op(req)) {
case REQ_OP_DRV_IN:
@@ -743,6 +749,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -759,6 +766,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ret = PTR_ERR(meta);
goto out_unmap;
}
+ req->cmd_flags |= REQ_INTEGRITY;
}
}
@@ -1104,7 +1112,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
}
if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
else
effects = nvme_known_admin_effects(opcode);
@@ -2172,7 +2180,8 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
- if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
+ if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
+ nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn);
@@ -2793,6 +2802,7 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
list_for_each_entry(h, &subsys->nsheads, entry) {
if (nvme_ns_ids_valid(&new->ids) &&
+ !list_empty(&h->list) &&
nvme_ns_ids_equal(&new->ids, &h->ids))
return -EINVAL;
}
@@ -2814,7 +2824,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
goto out_free_head;
head->instance = ret;
INIT_LIST_HEAD(&head->list);
- init_srcu_struct(&head->srcu);
+ ret = init_srcu_struct(&head->srcu);
+ if (ret)
+ goto out_ida_remove;
head->subsys = ctrl->subsys;
head->ns_id = nsid;
kref_init(&head->ref);
@@ -2836,6 +2848,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
return head;
out_cleanup_srcu:
cleanup_srcu_struct(&head->srcu);
+out_ida_remove:
ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
out_free_head:
kfree(head);
@@ -2974,31 +2987,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_init_ns_head(ns, nsid, id, &new))
goto out_free_id;
nvme_setup_streams_ns(ctrl, ns);
-
-#ifdef CONFIG_NVME_MULTIPATH
- /*
- * If multipathing is enabled we need to always use the subsystem
- * instance number for numbering our devices to avoid conflicts
- * between subsystems that have multiple controllers and thus use
- * the multipath-aware subsystem node and those that have a single
- * controller and use the controller node directly.
- */
- if (ns->head->disk) {
- sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
- ctrl->cntlid, ns->head->instance);
- flags = GENHD_FL_HIDDEN;
- } else {
- sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
- ns->head->instance);
- }
-#else
- /*
- * But without the multipath code enabled, multiple controller per
- * subsystems are visible as devices and thus we cannot use the
- * subsystem instance.
- */
- sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
-#endif
+ nvme_set_disk_name(disk_name, ns, ctrl, &flags);
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
if (nvme_nvm_register(ns, disk_name, node)) {
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index a74b372cedc7..fa8f153d1d7e 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -536,6 +536,56 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
return NULL;
}
+/*
+ * For something we're not in a state to send to the device the default action
+ * is to busy it and retry it after the controller state is recovered. However,
+ * anything marked for failfast or nvme multipath is immediately failed.
+ *
+ * Note: commands used to initialize the controller will be marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+blk_status_t nvmf_fail_nonready_command(struct request *rq)
+{
+ if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ return BLK_STS_RESOURCE;
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+ return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
+
+bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live)
+{
+ struct nvme_request *req = nvme_req(rq);
+
+ /*
+ * If we are in some state of setup or teardown only allow
+ * internally generated commands.
+ */
+ if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
+ return false;
+
+ /*
+ * Only allow commands on a live queue, except for the connect command,
+ * which is require to set the queue live in the appropinquate states.
+ */
+ switch (ctrl->state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_CONNECTING:
+ if (req->cmd->common.opcode == nvme_fabrics_command &&
+ req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+ return true;
+ break;
+ default:
+ break;
+ case NVME_CTRL_DEAD:
+ return false;
+ }
+
+ return queue_live;
+}
+EXPORT_SYMBOL_GPL(__nvmf_check_ready);
+
static const match_table_t opt_tokens = {
{ NVMF_OPT_TRANSPORT, "transport=%s" },
{ NVMF_OPT_TRADDR, "traddr=%s" },
@@ -589,6 +639,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->transport);
opts->transport = p;
break;
case NVMF_OPT_NQN:
@@ -597,6 +648,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->subsysnqn);
opts->subsysnqn = p;
nqnlen = strlen(opts->subsysnqn);
if (nqnlen >= NVMF_NQN_SIZE) {
@@ -608,10 +660,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->discovery_nqn =
!(strcmp(opts->subsysnqn,
NVME_DISC_SUBSYS_NAME));
- if (opts->discovery_nqn) {
- opts->kato = 0;
- opts->nr_io_queues = 0;
- }
break;
case NVMF_OPT_TRADDR:
p = match_strdup(args);
@@ -619,6 +667,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->traddr);
opts->traddr = p;
break;
case NVMF_OPT_TRSVCID:
@@ -627,6 +676,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->trsvcid);
opts->trsvcid = p;
break;
case NVMF_OPT_QUEUE_SIZE:
@@ -708,6 +758,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -EINVAL;
goto out;
}
+ nvmf_host_put(opts->host);
opts->host = nvmf_host_add(p);
kfree(p);
if (!opts->host) {
@@ -733,6 +784,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->host_traddr);
opts->host_traddr = p;
break;
case NVMF_OPT_HOST_ID:
@@ -761,6 +813,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
}
+ if (opts->discovery_nqn) {
+ opts->kato = 0;
+ opts->nr_io_queues = 0;
+ opts->duplicate_connect = true;
+ }
if (ctrl_loss_tmo < 0)
opts->max_reconnects = -1;
else
@@ -893,16 +950,6 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_module_put;
}
- if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
- dev_warn(ctrl->device,
- "controller returned incorrect NQN: \"%s\".\n",
- ctrl->subsys->subnqn);
- module_put(ops->module);
- up_read(&nvmf_transports_rwsem);
- nvme_delete_ctrl_sync(ctrl);
- return ERR_PTR(-EINVAL);
- }
-
module_put(ops->module);
up_read(&nvmf_transports_rwsem);
return ctrl;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a3145d90c1d2..b2a8f6a6c056 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -139,7 +139,9 @@ static inline bool
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
- if (strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
+ if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DEAD ||
+ strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
return false;
@@ -157,36 +159,17 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+blk_status_t nvmf_fail_nonready_command(struct request *rq);
+bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live);
-static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
- struct request *rq)
+static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live)
{
- struct nvme_command *cmd = nvme_req(rq)->cmd;
-
- /*
- * We cannot accept any other command until the connect command has
- * completed, so only allow connect to pass.
- */
- if (!blk_rq_is_passthrough(rq) ||
- cmd->common.opcode != nvme_fabrics_command ||
- cmd->fabrics.fctype != nvme_fabrics_type_connect) {
- /*
- * Connecting state means transport disruption or initial
- * establishment, which can take a long time and even might
- * fail permanently, fail fast to give upper layers a chance
- * to failover.
- * Deleting state means that the ctrl will never accept commands
- * again, fail it permanently.
- */
- if (ctrl->state == NVME_CTRL_CONNECTING ||
- ctrl->state == NVME_CTRL_DELETING) {
- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
- return BLK_STS_IOERR;
- }
- return BLK_STS_RESOURCE; /* try again later */
- }
-
- return BLK_STS_OK;
+ if (likely(ctrl->state == NVME_CTRL_LIVE ||
+ ctrl->state == NVME_CTRL_ADMIN_ONLY))
+ return true;
+ return __nvmf_check_ready(ctrl, rq, queue_live);
}
#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index c032e20e283a..aad44525aac5 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -144,6 +144,7 @@ struct nvme_fc_ctrl {
struct nvme_fc_rport *rport;
u32 cnum;
+ bool ioq_live;
bool assoc_active;
u64 association_id;
@@ -1474,21 +1475,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
-static int
-nvme_fc_reinit_request(void *data, struct request *rq)
-{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
-
- memset(cmdiu, 0, sizeof(*cmdiu));
- cmdiu->scsi_id = NVME_CMD_SCSI_ID;
- cmdiu->fc_id = NVME_CMD_FC_ID;
- cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
- memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
-
- return 0;
-}
-
static void
__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_fcp_op *op)
@@ -1690,16 +1676,6 @@ done:
goto check_error;
}
- /*
- * Force failures of commands if we're killing the controller
- * or have an error on a command used to create an new association
- */
- if (status &&
- (blk_queue_dying(rq->q) ||
- ctrl->ctrl.state == NVME_CTRL_NEW ||
- ctrl->ctrl.state == NVME_CTRL_CONNECTING))
- status |= cpu_to_le16(NVME_SC_DNR << 1);
-
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
nvme_end_request(rq, status, result);
@@ -1907,6 +1883,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
*/
queue->connection_id = 0;
+ atomic_set(&queue->csn, 1);
}
static void
@@ -2287,14 +2264,6 @@ busy:
return BLK_STS_RESOURCE;
}
-static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
- struct request *rq)
-{
- if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
@@ -2307,12 +2276,13 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
enum nvmefc_fcp_datadir io_dir;
+ bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
u32 data_len;
blk_status_t ret;
- ret = nvme_fc_is_ready(queue, rq);
- if (unlikely(ret))
- return ret;
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
+ !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvmf_fail_nonready_command(rq);
ret = nvme_setup_cmd(ns, rq, sqe);
if (ret)
@@ -2492,6 +2462,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queues;
+ ctrl->ioq_live = true;
+
return 0;
out_delete_hw_queues:
@@ -2509,7 +2481,7 @@ out_free_tag_set:
}
static int
-nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
+nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
unsigned int nr_io_queues;
@@ -2529,12 +2501,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.queue_count == 1)
return 0;
- nvme_fc_init_io_queues(ctrl);
-
- ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
- if (ret)
- goto out_free_io_queues;
-
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
@@ -2632,8 +2598,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
* Create the admin queue
*/
- nvme_fc_init_queue(ctrl, 0);
-
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
NVME_AQ_DEPTH);
if (ret)
@@ -2644,8 +2608,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queue;
- if (ctrl->ctrl.state != NVME_CTRL_NEW)
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
if (ret)
@@ -2718,10 +2681,10 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
*/
if (ctrl->ctrl.queue_count > 1) {
- if (ctrl->ctrl.state == NVME_CTRL_NEW)
+ if (!ctrl->ioq_live)
ret = nvme_fc_create_io_queues(ctrl);
else
- ret = nvme_fc_reinit_io_queues(ctrl);
+ ret = nvme_fc_recreate_io_queues(ctrl);
if (ret)
goto out_term_aen_ops;
}
@@ -2805,8 +2768,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
* use blk_mq_tagset_busy_itr() and the transport routine to
* terminate the exchanges.
*/
- if (ctrl->ctrl.state != NVME_CTRL_NEW)
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2947,7 +2909,6 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
- .reinit_request = nvme_fc_reinit_request,
};
static void
@@ -2964,7 +2925,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
nvme_fc_reconnect_or_delete(ctrl, ret);
else
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: controller reconnect complete\n",
+ "NVME-FC{%d}: controller connect complete\n",
ctrl->cnum);
}
@@ -3012,7 +2973,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
{
struct nvme_fc_ctrl *ctrl;
unsigned long flags;
- int ret, idx, retry;
+ int ret, idx;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -3039,11 +3000,13 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
}
ctrl->ctrl.opts = opts;
+ ctrl->ctrl.nr_reconnects = 0;
INIT_LIST_HEAD(&ctrl->ctrl_list);
ctrl->lport = lport;
ctrl->rport = rport;
ctrl->dev = lport->dev;
ctrl->cnum = idx;
+ ctrl->ioq_live = false;
ctrl->assoc_active = false;
init_waitqueue_head(&ctrl->ioabort_wait);
@@ -3062,6 +3025,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
+ ctrl->ctrl.cntlid = 0xffff;
ret = -ENOMEM;
ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
@@ -3069,6 +3033,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
if (!ctrl->queues)
goto out_free_ida;
+ nvme_fc_init_queue(ctrl, 0);
+
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
@@ -3111,62 +3077,24 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- /*
- * It's possible that transactions used to create the association
- * may fail. Examples: CreateAssociation LS or CreateIOConnection
- * LS gets dropped/corrupted/fails; or a frame gets dropped or a
- * command times out for one of the actions to init the controller
- * (Connect, Get/Set_Property, Set_Features, etc). Many of these
- * transport errors (frame drop, LS failure) inherently must kill
- * the association. The transport is coded so that any command used
- * to create the association (prior to a LIVE state transition
- * while NEW or CONNECTING) will fail if it completes in error or
- * times out.
- *
- * As such: as the connect request was mostly likely due to a
- * udev event that discovered the remote port, meaning there is
- * not an admin or script there to restart if the connect
- * request fails, retry the initial connection creation up to
- * three times before giving up and declaring failure.
- */
- for (retry = 0; retry < 3; retry++) {
- ret = nvme_fc_create_association(ctrl);
- if (!ret)
- break;
- }
-
- if (ret) {
- nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
- cancel_work_sync(&ctrl->ctrl.reset_work);
- cancel_delayed_work_sync(&ctrl->connect_work);
-
- /* couldn't schedule retry - fail out */
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
+ !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
- "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
-
- ctrl->ctrl.opts = NULL;
+ "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
+ goto fail_ctrl;
+ }
- /* initiate nvme ctrl ref counting teardown */
- nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_get_ctrl(&ctrl->ctrl);
- /* Remove core ctrl ref. */
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
nvme_put_ctrl(&ctrl->ctrl);
-
- /* as we're past the point where we transition to the ref
- * counting teardown path, if we return a bad pointer here,
- * the calling routine, thinking it's prior to the
- * transition, will do an rport put. Since the teardown
- * path also does a rport put, we do an extra get here to
- * so proper order/teardown happens.
- */
- nvme_fc_rport_get(rport);
-
- if (ret > 0)
- ret = -EIO;
- return ERR_PTR(ret);
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: failed to schedule initial connect\n",
+ ctrl->cnum);
+ goto fail_ctrl;
}
- nvme_get_ctrl(&ctrl->ctrl);
+ flush_delayed_work(&ctrl->connect_work);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
@@ -3174,6 +3102,30 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
return &ctrl->ctrl;
+fail_ctrl:
+ nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ctrl.reset_work);
+ cancel_delayed_work_sync(&ctrl->connect_work);
+
+ ctrl->ctrl.opts = NULL;
+
+ /* initiate nvme ctrl ref counting teardown */
+ nvme_uninit_ctrl(&ctrl->ctrl);
+
+ /* Remove core ctrl ref. */
+ nvme_put_ctrl(&ctrl->ctrl);
+
+ /* as we're past the point where we transition to the ref
+ * counting teardown path, if we return a bad pointer here,
+ * the calling routine, thinking it's prior to the
+ * transition, will do an rport put. Since the teardown
+ * path also does a rport put, we do an extra get here to
+ * so proper order/teardown happens.
+ */
+ nvme_fc_rport_get(rport);
+
+ return ERR_PTR(-EIO);
+
out_cleanup_admin_q:
blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_admin_tag_set:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 0fb560bbadd7..43821caf226e 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -19,6 +19,28 @@ module_param(multipath, bool, 0644);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+/*
+ * If multipathing is enabled we need to always use the subsystem instance
+ * number for numbering our devices to avoid conflicts between subsystems that
+ * have multiple controllers and thus use the multipath-aware subsystem node
+ * and those that have a single controller and use the controller node
+ * directly.
+ */
+void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags)
+{
+ if (!multipath) {
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+ } else if (ns->head->disk) {
+ sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+ ctrl->cntlid, ns->head->instance);
+ *flags = GENHD_FL_HIDDEN;
+ } else {
+ sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
+ ns->head->instance);
+ }
+}
+
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6beafb84bb54..40695e589646 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -110,6 +110,7 @@ struct nvme_request {
enum {
NVME_REQ_CANCELLED = (1 << 0),
+ NVME_REQ_USERCMD = (1 << 1),
};
static inline struct nvme_request *nvme_req(struct request *req)
@@ -410,6 +411,8 @@ extern const struct attribute_group nvme_ns_id_attr_group;
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
+void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
@@ -435,6 +438,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
}
#else
+/*
+ * Without the multipath code enabled, multiple controller per subsystems are
+ * visible as devices and thus we cannot use the subsystem instance.
+ */
+static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags)
+{
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+}
+
static inline void nvme_failover_req(struct request *req)
{
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4ce578b83262..90b57cff72fe 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1594,17 +1594,6 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
-/*
- * We cannot accept any other command until the Connect command has completed.
- */
-static inline blk_status_t
-nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
-{
- if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1615,14 +1604,14 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_rdma_qe *sqe = &req->sqe;
struct nvme_command *c = sqe->data;
struct ib_device *dev;
+ bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
blk_status_t ret;
int err;
WARN_ON_ONCE(rq->tag < 0);
- ret = nvme_rdma_is_ready(queue, rq);
- if (unlikely(ret))
- return ret;
+ if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvmf_fail_nonready_command(rq);
dev = queue->device->dev;
ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 98a8bb3a00de..b4f99fac2ee1 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -31,7 +31,7 @@
/* *************************** Data Structures/Defines ****************** */
-#define NVMET_LS_CTX_COUNT 4
+#define NVMET_LS_CTX_COUNT 256
/* for this implementation, assume small single frame rqst/rsp */
#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 861d1509b22b..04141fd94fa9 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -149,14 +149,6 @@ nvme_loop_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
-static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
- struct request *rq)
-{
- if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -164,11 +156,11 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_loop_queue *queue = hctx->driver_data;
struct request *req = bd->rq;
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
blk_status_t ret;
- ret = nvme_loop_is_ready(queue, req);
- if (unlikely(ret))
- return ret;
+ if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
+ return nvmf_fail_nonready_command(req);
ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret)
@@ -481,6 +473,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_stop_ctrl(&ctrl->ctrl);
nvme_loop_shutdown_ctrl(ctrl);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
ret = nvme_loop_configure_admin_queue(ctrl);
if (ret)
goto out_disable;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 35bfb31849b6..59b7cfa54ccd 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1159,11 +1159,14 @@ static int pci_pm_runtime_suspend(struct device *dev)
int error;
/*
- * If pci_dev->driver is not set (unbound), the device should
- * always remain in D0 regardless of the runtime PM status
+ * If pci_dev->driver is not set (unbound), we leave the device in D0,
+ * but it may go to D3cold when the bridge above it runtime suspends.
+ * Save its config space in case that happens.
*/
- if (!pci_dev->driver)
+ if (!pci_dev->driver) {
+ pci_save_state(pci_dev);
return 0;
+ }
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
@@ -1211,16 +1214,18 @@ static int pci_pm_runtime_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
/*
- * If pci_dev->driver is not set (unbound), the device should
- * always remain in D0 regardless of the runtime PM status
+ * Restoring config space is necessary even if the device is not bound
+ * to a driver because although we left it in D0, it may have gone to
+ * D3cold when the bridge above it runtime suspended.
*/
+ pci_restore_standard_config(pci_dev);
+
if (!pci_dev->driver)
return 0;
if (!pm || !pm->runtime_resume)
return -ENOSYS;
- pci_restore_standard_config(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
__pci_enable_wake(pci_dev, PCI_D0, true, false);
pci_fixup_device(pci_fixup_resume, pci_dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ddbeca1fbc17..298a0faa98f3 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3897,6 +3897,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
quirk_dma_func1_alias);
@@ -4853,6 +4856,10 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index b7dc140a0353..f13f533dea92 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1002,10 +1002,10 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
- .ident = "Lenovo Legion Y520-15IKBN",
+ .ident = "Lenovo Legion Y520-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
},
},
{
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 09d677d5d3f0..adde2482618a 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -31,6 +31,7 @@ static void of_get_regulation_constraints(struct device_node *np,
struct regulation_constraints *constraints = &(*init_data)->constraints;
struct regulator_state *suspend_state;
struct device_node *suspend_np;
+ unsigned int mode;
int ret, i;
u32 pval;
@@ -105,11 +106,11 @@ static void of_get_regulation_constraints(struct device_node *np,
if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) {
if (desc && desc->of_map_mode) {
- ret = desc->of_map_mode(pval);
- if (ret == -EINVAL)
+ mode = desc->of_map_mode(pval);
+ if (mode == REGULATOR_MODE_INVALID)
pr_err("%s: invalid mode %u\n", np->name, pval);
else
- constraints->initial_mode = ret;
+ constraints->initial_mode = mode;
} else {
pr_warn("%s: mapping for mode %d not defined\n",
np->name, pval);
@@ -144,12 +145,12 @@ static void of_get_regulation_constraints(struct device_node *np,
if (!of_property_read_u32(suspend_np, "regulator-mode",
&pval)) {
if (desc && desc->of_map_mode) {
- ret = desc->of_map_mode(pval);
- if (ret == -EINVAL)
+ mode = desc->of_map_mode(pval);
+ if (mode == REGULATOR_MODE_INVALID)
pr_err("%s: invalid mode %u\n",
np->name, pval);
else
- suspend_state->mode = ret;
+ suspend_state->mode = mode;
} else {
pr_warn("%s: mapping for mode %d not defined\n",
np->name, pval);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 6c9ec84121bd..ae94fcedeebc 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -274,7 +274,7 @@ static inline unsigned int twl4030reg_map_mode(unsigned int mode)
case RES_STATE_SLEEP:
return REGULATOR_MODE_STANDBY;
default:
- return -EINVAL;
+ return REGULATOR_MODE_INVALID;
}
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index abbf5d6dcebb..939b5b5e97ef 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -526,8 +526,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue == card->qdio.no_in_queues - 1;
}
-
-static int qeth_issue_next_read(struct qeth_card *card)
+static int __qeth_issue_next_read(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -558,6 +557,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
return rc;
}
+static int qeth_issue_next_read(struct qeth_card *card)
+{
+ int ret;
+
+ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+ ret = __qeth_issue_next_read(card);
+ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+
+ return ret;
+}
+
static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
@@ -961,7 +971,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_running_mask &= ~thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
- wake_up(&card->wait_q);
+ wake_up_all(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
@@ -1184,7 +1194,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
- qeth_issue_next_read(card);
+ __qeth_issue_next_read(card);
iob = channel->iob;
index = channel->buf_no;
@@ -2088,25 +2098,26 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
reply->callback = reply_cb;
reply->param = reply_param;
- if (card->state == CARD_STATE_DOWN)
- reply->seqno = QETH_IDX_COMMAND_SEQNO;
- else
- reply->seqno = card->seqno.ipa++;
+
init_waitqueue_head(&reply->wait_q);
- spin_lock_irqsave(&card->lock, flags);
- list_add_tail(&reply->list, &card->cmd_waiter_list);
- spin_unlock_irqrestore(&card->lock, flags);
QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
- qeth_prepare_control_data(card, len, iob);
if (IS_IPA(iob->data)) {
cmd = __ipa_cmd(iob);
+ cmd->hdr.seqno = card->seqno.ipa++;
+ reply->seqno = cmd->hdr.seqno;
event_timeout = QETH_IPA_TIMEOUT;
} else {
+ reply->seqno = QETH_IDX_COMMAND_SEQNO;
event_timeout = QETH_TIMEOUT;
}
+ qeth_prepare_control_data(card, len, iob);
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_add_tail(&reply->list, &card->cmd_waiter_list);
+ spin_unlock_irqrestore(&card->lock, flags);
timeout = jiffies + event_timeout;
@@ -2897,7 +2908,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
cmd->hdr.command = command;
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
- cmd->hdr.seqno = card->seqno.ipa;
+ /* cmd->hdr.seqno is set by qeth_send_control_data() */
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
if (card->options.layer2)
@@ -3862,10 +3873,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
int qeth_get_elements_no(struct qeth_card *card,
struct sk_buff *skb, int extra_elems, int data_offset)
{
- int elements = qeth_get_elements_for_range(
- (addr_t)skb->data + data_offset,
- (addr_t)skb->data + skb_headlen(skb)) +
- qeth_get_elements_for_frags(skb);
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
+ int elements = qeth_get_elements_for_frags(skb);
+ addr_t start = (addr_t)skb->data + data_offset;
+
+ if (start != end)
+ elements += qeth_get_elements_for_range(start, end);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
@@ -5059,8 +5072,6 @@ static void qeth_core_free_card(struct qeth_card *card)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
- if (card->dev)
- free_netdev(card->dev);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 4a11539fc23c..3aeea0d77777 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -969,8 +969,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b2c2f4647936..e936980350cf 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2625,11 +2625,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
struct sk_buff *skb, int extra_elems)
{
- addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
- int elements = qeth_get_elements_for_range(
- tcpdptr,
- (addr_t)skb->data + skb_headlen(skb)) +
- qeth_get_elements_for_frags(skb);
+ addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
+ addr_t end = (addr_t)skb->data + skb_headlen(skb);
+ int elements = qeth_get_elements_for_frags(skb);
+
+ if (start != end)
+ elements += qeth_get_elements_for_range(start, end);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2,
@@ -3055,8 +3056,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 18eefc3f1abe..0c6065dba48a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2414,39 +2414,21 @@ struct cgr_comp {
struct completion completion;
};
-static int qman_delete_cgr_thread(void *p)
+static void qman_delete_cgr_smp_call(void *p)
{
- struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
- int ret;
-
- ret = qman_delete_cgr(cgr_comp->cgr);
- complete(&cgr_comp->completion);
-
- return ret;
+ qman_delete_cgr((struct qman_cgr *)p);
}
void qman_delete_cgr_safe(struct qman_cgr *cgr)
{
- struct task_struct *thread;
- struct cgr_comp cgr_comp;
-
preempt_disable();
if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
- init_completion(&cgr_comp.completion);
- cgr_comp.cgr = cgr;
- thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
- "cgr_del");
-
- if (IS_ERR(thread))
- goto out;
-
- kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
- wake_up_process(thread);
- wait_for_completion(&cgr_comp.completion);
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_delete_cgr_smp_call, cgr, true);
preempt_enable();
return;
}
-out:
+
qman_delete_cgr(cgr);
preempt_enable();
}
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index e7e64913a748..e1d0b1be8d86 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1077,13 +1077,14 @@ static int omap8250_no_handle_irq(struct uart_port *port)
return 0;
}
+static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
static const struct of_device_id omap8250_dt_ids[] = {
{ .compatible = "ti,omap2-uart" },
{ .compatible = "ti,omap3-uart" },
- { .compatible = "ti,omap4-uart" },
+ { .compatible = "ti,omap4-uart", .data = &omap4_habit, },
{ .compatible = "ti,am3352-uart", .data = &am3352_habit, },
{ .compatible = "ti,am4372-uart", .data = &am3352_habit, },
{ .compatible = "ti,dra742-uart", .data = &dra742_habit, },
@@ -1320,6 +1321,19 @@ static int omap8250_soft_reset(struct device *dev)
int sysc;
int syss;
+ /*
+ * At least on omap4, unused uarts may not idle after reset without
+ * a basic scr dma configuration even with no dma in use. The
+ * module clkctrl status bits will be 1 instead of 3 blocking idle
+ * for the whole clockdomain. The softreset below will clear scr,
+ * and we restore it on resume so this is safe to do on all SoCs
+ * needing omap8250_soft_reset() quirk. Do it in two writes as
+ * recommended in the comment for omap8250_update_scr().
+ */
+ serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
+ serial_out(up, UART_OMAP_SCR,
+ OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
+
sysc = serial_in(up, UART_OMAP_SYSC);
/* softreset the UART */
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 3e4b717670d7..59cb62de236b 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -331,7 +331,7 @@ static int altera_uart_startup(struct uart_port *port)
/* Enable RX interrupts now */
pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
- writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
spin_unlock_irqrestore(&port->lock, flags);
@@ -347,7 +347,7 @@ static void altera_uart_shutdown(struct uart_port *port)
/* Disable all interrupts now */
pp->imr = 0;
- writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
spin_unlock_irqrestore(&port->lock, flags);
@@ -436,7 +436,7 @@ static void altera_uart_console_putc(struct uart_port *port, int c)
ALTERA_UART_STATUS_TRDY_MSK))
cpu_relax();
- writel(c, port->membase + ALTERA_UART_TXDATA_REG);
+ altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG);
}
static void altera_uart_console_write(struct console *co, const char *s,
@@ -506,13 +506,13 @@ static int __init altera_uart_earlycon_setup(struct earlycon_device *dev,
return -ENODEV;
/* Enable RX interrupts now */
- writel(ALTERA_UART_CONTROL_RRDY_MSK,
- port->membase + ALTERA_UART_CONTROL_REG);
+ altera_uart_writel(port, ALTERA_UART_CONTROL_RRDY_MSK,
+ ALTERA_UART_CONTROL_REG);
if (dev->baud) {
unsigned int baudclk = port->uartclk / dev->baud;
- writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG);
+ altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
}
dev->con->write = altera_uart_earlycon_write;
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 5ac06fcaa9c6..fec48deb074c 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -596,6 +596,11 @@ static int arc_serial_probe(struct platform_device *pdev)
if (dev_id < 0)
dev_id = 0;
+ if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
+ dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
+ return -EINVAL;
+ }
+
uart = &arc_uart_ports[dev_id];
port = &uart->port;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 15df1ba78095..50be54f5565f 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1987,6 +1987,10 @@ static int lpuart_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
return ret;
}
+ if (ret >= ARRAY_SIZE(lpuart_ports)) {
+ dev_err(&pdev->dev, "serial%d out of range\n", ret);
+ return -EINVAL;
+ }
sport->port.line = ret;
sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 54389f61efd8..cdcf3e1c5712 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2103,6 +2103,12 @@ static int serial_imx_probe(struct platform_device *pdev)
else if (ret < 0)
return ret;
+ if (sport->port.line >= ARRAY_SIZE(imx_ports)) {
+ dev_err(&pdev->dev, "serial%d out of range\n",
+ sport->port.line);
+ return -EINVAL;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index be94246b6fcc..673c8fd7e34f 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1667,6 +1667,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.line = pdev->id < 0 ? 0 : pdev->id;
else if (ret < 0)
return ret;
+ if (s->port.line >= ARRAY_SIZE(auart_port)) {
+ dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
+ return -EINVAL;
+ }
if (of_id) {
pdev->id_entry = of_id->data;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 8aca18c4cdea..57baa84ccf86 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -865,15 +865,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
dma->rx_conf.direction = DMA_DEV_TO_MEM;
dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
- dma->rx_conf.src_maxburst = 16;
+ dma->rx_conf.src_maxburst = 1;
dma->tx_conf.direction = DMA_MEM_TO_DEV;
dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
- if (dma_get_cache_alignment() >= 16)
- dma->tx_conf.dst_maxburst = 16;
- else
- dma->tx_conf.dst_maxburst = 1;
+ dma->tx_conf.dst_maxburst = 1;
dma->rx_chan = dma_request_chan(p->port.dev, "rx");
@@ -1821,6 +1818,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
+ if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
+ dev_err(&pdev->dev, "serial%d out of range\n", index);
+ return -EINVAL;
+ }
ourport = &s3c24xx_serial_ports[index];
ourport->drv_data = s3c24xx_get_driver_data(pdev);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index b079a258d18f..d20b6b1e1b10 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2650,8 +2650,8 @@ found:
dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
PTR_ERR(clk));
else
- dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
- clk, clk);
+ dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
+ clk, clk_get_rate(clk));
sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
}
return 0;
@@ -3056,6 +3056,10 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
return NULL;
}
+ if (id >= ARRAY_SIZE(sci_ports)) {
+ dev_err(&pdev->dev, "serial%d out of range\n", id);
+ return NULL;
+ }
sp = &sci_ports[id];
*dev_id = id;
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 422e978e68e1..9acf37642b2c 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1114,7 +1114,7 @@ static struct uart_port *cdns_uart_get_port(int id)
struct uart_port *port;
/* Try the given port id if failed use default method */
- if (cdns_uart_port[id].mapbase != 0) {
+ if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) {
/* Find the next unused port */
for (id = 0; id < CDNS_UART_NR_PORTS; id++)
if (cdns_uart_port[id].mapbase == 0)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 83ad2ac0cbea..bcffef49ca6c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1095,6 +1095,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, umem);
vhost_net_vq_reset(n);
done:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e4613a3c362d..c6a8075714f6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2396,6 +2396,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
if (!node)
return NULL;
+
+ /* Make sure all padding within the structure is initialized. */
+ memset(&node->msg, 0, sizeof node->msg);
node->vq = vq;
node->msg.type = type;
return node;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a4621757a47f..dacb5919970c 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -113,6 +113,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
if (IS_ERR(mdev->clk))
return PTR_ERR(mdev->clk);
+ err = clk_prepare_enable(mdev->clk);
+ if (err)
+ return err;
+
clkrate = clk_get_rate(mdev->clk);
if (clkrate < 10000000)
dev_warn(&pdev->dev,
@@ -126,12 +130,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdev->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mdev->regs))
- return PTR_ERR(mdev->regs);
-
- err = clk_prepare_enable(mdev->clk);
- if (err)
- return err;
+ if (IS_ERR(mdev->regs)) {
+ err = PTR_ERR(mdev->regs);
+ goto out_disable_clk;
+ }
/* Software reset 1-Wire module */
writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
@@ -147,8 +149,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
err = w1_add_master_device(&mdev->bus_master);
if (err)
- clk_disable_unprepare(mdev->clk);
+ goto out_disable_clk;
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(mdev->clk);
return err;
}
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index f4718098ac31..1bf786a3a4d5 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -384,8 +384,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
s = strchr(p, del);
if (!s)
goto einval;
- *s++ = '\0';
- e->offset = simple_strtoul(p, &p, 10);
+ *s = '\0';
+ if (p != s) {
+ int r = kstrtoint(p, 10, &e->offset);
+ if (r != 0 || e->offset < 0)
+ goto einval;
+ }
+ p = s;
if (*p++)
goto einval;
pr_debug("register: offset: %#x\n", e->offset);
@@ -425,7 +430,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
if (e->mask &&
string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
goto einval;
- if (e->size + e->offset > BINPRM_BUF_SIZE)
+ if (e->size > BINPRM_BUF_SIZE ||
+ BINPRM_BUF_SIZE - e->size < e->offset)
goto einval;
pr_debug("register: magic/mask length: %i\n", e->size);
if (USE_DEBUG) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 6636b602064a..0caae6a719ce 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1123,6 +1123,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
if (IS_ERR(realdn)) {
pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
PTR_ERR(realdn), dn, in, ceph_vinop(in));
+ dput(dn);
dn = realdn; /* note realdn contains the error */
goto out;
} else if (realdn) {
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index da3cb4002364..e862cf923f46 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -178,6 +178,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* reconnect the same SMB session
*/
mutex_lock(&ses->session_mutex);
+
+ /*
+ * Recheck after acquire mutex. If another thread is negotiating
+ * and the server never sends an answer the socket will be closed
+ * and tcpStatus set to reconnect.
+ */
+ if (server->tcpStatus == CifsNeedReconnect) {
+ rc = -EHOSTDOWN;
+ mutex_unlock(&ses->session_mutex);
+ goto out;
+ }
+
rc = cifs_negotiate_protocol(0, ses);
if (rc == 0 && ses->need_reconnect)
rc = cifs_setup_session(0, ses, nls_codepage);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7410b6c0a123..c4dae9999a10 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -514,7 +514,8 @@ server_unresponsive(struct TCP_Server_Info *server)
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
- if (server->tcpStatus == CifsGood &&
+ if ((server->tcpStatus == CifsGood ||
+ server->tcpStatus == CifsNeedNegotiate) &&
time_after(jiffies, server->lstrp + 2 * server->echo_interval)) {
cifs_dbg(VFS, "Server %s has not responded in %lu seconds. Reconnecting...\n",
server->hostname, (2 * server->echo_interval) / HZ);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 72595f9946e2..6a3b030fad09 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -238,6 +238,18 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
* the same SMB session
*/
mutex_lock(&tcon->ses->session_mutex);
+
+ /*
+ * Recheck after acquire mutex. If another thread is negotiating
+ * and the server never sends an answer the socket will be closed
+ * and tcpStatus set to reconnect.
+ */
+ if (server->tcpStatus == CifsNeedReconnect) {
+ rc = -EHOSTDOWN;
+ mutex_unlock(&tcon->ses->session_mutex);
+ goto out;
+ }
+
rc = cifs_negotiate_protocol(0, tcon->ses);
if (!rc && tcon->ses->need_reconnect)
rc = cifs_setup_session(0, tcon->ses, nls_codepage);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 52479f180ea1..7bedde0605a4 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -415,11 +415,8 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
return htonl(NFS4ERR_SEQ_FALSE_RETRY);
}
- /* Wraparound */
- if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
- if (args->csa_sequenceid == 1)
- return htonl(NFS4_OK);
- } else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
+ /* Note: wraparound relies on seq_nr being of type u32 */
+ if (likely(args->csa_sequenceid == slot->seq_nr + 1))
return htonl(NFS4_OK);
/* Misordered request */
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 8b31d9be11d4..29dfb0899d64 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -831,23 +831,9 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
is_local = 1;
- /*
- * VFS doesn't require the open mode to match a flock() lock's type.
- * NFS, however, may simulate flock() locking with posix locking which
- * requires the open mode to match the lock type.
- */
- switch (fl->fl_type) {
- case F_UNLCK:
+ /* We're simulating flock() locks using posix locks on the server */
+ if (fl->fl_type == F_UNLCK)
return do_unlk(filp, cmd, fl, is_local);
- case F_RDLCK:
- if (!(filp->f_mode & FMODE_READ))
- return -EBADF;
- break;
- case F_WRLCK:
- if (!(filp->f_mode & FMODE_WRITE))
- return -EBADF;
- }
-
return do_setlk(filp, cmd, fl, is_local);
}
EXPORT_SYMBOL_GPL(nfs_flock);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ca70ac2ad433..b98e86f61842 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6491,6 +6491,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
return -ENOLCK;
+ /*
+ * Don't rely on the VFS having checked the file open mode,
+ * since it won't do this for flock() locks.
+ */
+ switch (request->fl_type) {
+ case F_RDLCK:
+ if (!(filp->f_mode & FMODE_READ))
+ return -EBADF;
+ break;
+ case F_WRLCK:
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+ }
+
status = nfs4_set_lock_state(state, request);
if (status != 0)
return status;
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index c330495c3115..ffc8bdd63a11 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -130,9 +130,20 @@ static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags)
return 0;
}
+int procfs_drop_fd_dentries = 0;
+
+static int tid_fd_delete_dentry(const struct dentry *dentry)
+{
+ /* Always delete immediately */
+ if (procfs_drop_fd_dentries)
+ return 1;
+
+ return pid_delete_dentry(dentry);
+}
+
static const struct dentry_operations tid_fd_dentry_operations = {
.d_revalidate = tid_fd_revalidate,
- .d_delete = pid_delete_dentry,
+ .d_delete = tid_fd_delete_dentry,
};
static int proc_fd_link(struct dentry *dentry, struct path *path)
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ec47101cb1bf..2752e2be30de 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -623,13 +623,13 @@ struct sock_cgroup_data {
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 5a5547fdeda0..c2f50355c453 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -851,7 +851,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change(struct phy_device *phydev);
void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
void phy_start_machine(struct phy_device *phydev);
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 2d2bf592d9db..44c3687fff5a 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -88,4 +88,6 @@ struct ns_common;
int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));
+extern int procfs_drop_fd_dentries;
+
#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index df176d7c2b87..25602afd4844 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -80,6 +80,7 @@ struct regmap;
* These modes can be OR'ed together to make up a mask of valid register modes.
*/
+#define REGULATOR_MODE_INVALID 0x0
#define REGULATOR_MODE_FAST 0x1
#define REGULATOR_MODE_NORMAL 0x2
#define REGULATOR_MODE_IDLE 0x4
diff --git a/include/net/arp.h b/include/net/arp.h
index 65619a2de6f4..40522874d7b8 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
+ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+ key = INADDR_ANY;
+
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index fe80bb48ab1f..c4d2e82d9881 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -217,6 +217,11 @@ int net_eq(const struct net *net1, const struct net *net2)
return net1 == net2;
}
+static inline int check_net(const struct net *net)
+{
+ return atomic_read(&net->count) != 0;
+}
+
void net_drop_ns(void *);
#else
@@ -241,6 +246,11 @@ int net_eq(const struct net *net1, const struct net *net2)
return 1;
}
+static inline int check_net(const struct net *net)
+{
+ return 1;
+}
+
#define net_drop_ns NULL
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 320edfaf5e48..165e5e08e137 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -712,6 +712,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
*to_free = skb;
}
+static inline void __qdisc_drop_all(struct sk_buff *skb,
+ struct sk_buff **to_free)
+{
+ if (skb->prev)
+ skb->prev->next = *to_free;
+ else
+ skb->next = *to_free;
+ *to_free = skb;
+}
+
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct qdisc_skb_head *qh,
struct sk_buff **to_free)
@@ -832,6 +842,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
+static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop_all(skb, to_free);
+ qdisc_qstats_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size.
*/
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index fd24153e8a48..66a74c22114c 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -170,7 +170,7 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
* @tail : The new queue tail code word
* Return: The previous queue tail code word
*
- * xchg(lock, tail)
+ * xchg(lock, tail), which heads an address dependency
*
* p,*,* -> n,*,* ; prev = xchg(lock, node)
*/
@@ -496,6 +496,14 @@ queue:
tail = encode_tail(smp_processor_id(), idx);
node += idx;
+
+ /*
+ * Ensure that we increment the head node->count before initialising
+ * the actual node. If the compiler is kind enough to reorder these
+ * stores, then an IRQ could overwrite our assignments.
+ */
+ barrier();
+
node->locked = 0;
node->next = NULL;
pv_init_node(node);
@@ -525,16 +533,15 @@ queue:
*/
if (old & _Q_TAIL_MASK) {
prev = decode_tail(old);
+
/*
- * The above xchg_tail() is also a load of @lock which generates,
- * through decode_tail(), a pointer.
- *
- * The address dependency matches the RELEASE of xchg_tail()
- * such that the access to @prev must happen after.
+ * We must ensure that the stores to @node are observed before
+ * the write to prev->next. The address dependency from
+ * xchg_tail is not sufficient to ensure this because the read
+ * component of xchg_tail is unordered with respect to the
+ * initialisation of @node.
*/
- smp_read_barrier_depends();
-
- WRITE_ONCE(prev->next, node);
+ smp_store_release(&prev->next, node);
pv_wait_node(node, prev);
arch_mcs_spin_lock_contended(&node->locked);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1cd2d943b9b5..98caf74882ac 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1864,6 +1864,15 @@ static struct ctl_table fs_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &one,
},
+ {
+ .procname = "procfs-drop-fd-dentries",
+ .data = &procfs_drop_fd_dentries,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
{ }
};
diff --git a/lib/kobject.c b/lib/kobject.c
index 763d70a18941..2d139aadc45f 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -206,8 +206,9 @@ static int kobject_add_internal(struct kobject *kobj)
return -ENOENT;
if (!kobj->name || !kobj->name[0]) {
- WARN(1, "kobject: (%p): attempted to be registered with empty "
- "name!\n", kobj);
+ WARN(1,
+ "kobject: (%p): attempted to be registered with empty name!\n",
+ kobj);
return -EINVAL;
}
@@ -234,14 +235,12 @@ static int kobject_add_internal(struct kobject *kobj)
/* be noisy on error issues */
if (error == -EEXIST)
- WARN(1, "%s failed for %s with "
- "-EEXIST, don't try to register things with "
- "the same name in the same directory.\n",
- __func__, kobject_name(kobj));
+ pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
else
- WARN(1, "%s failed for %s (error: %d parent: %s)\n",
- __func__, kobject_name(kobj), error,
- parent ? kobject_name(parent) : "'none'");
+ pr_err("%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
@@ -336,8 +335,8 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
}
if (kobj->state_initialized) {
/* do not error out as sometimes we can recover */
- printk(KERN_ERR "kobject (%p): tried to init an initialized "
- "object, something is seriously wrong.\n", kobj);
+ pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n",
+ kobj);
dump_stack();
}
@@ -346,7 +345,7 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
return;
error:
- printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str);
+ pr_err("kobject (%p): %s\n", kobj, err_str);
dump_stack();
}
EXPORT_SYMBOL(kobject_init);
@@ -359,7 +358,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
retval = kobject_set_name_vargs(kobj, fmt, vargs);
if (retval) {
- printk(KERN_ERR "kobject: can not set name properly!\n");
+ pr_err("kobject: can not set name properly!\n");
return retval;
}
kobj->parent = parent;
@@ -401,8 +400,7 @@ int kobject_add(struct kobject *kobj, struct kobject *parent,
return -EINVAL;
if (!kobj->state_initialized) {
- printk(KERN_ERR "kobject '%s' (%p): tried to add an "
- "uninitialized object, something is seriously wrong.\n",
+ pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n",
kobject_name(kobj), kobj);
dump_stack();
return -EINVAL;
@@ -592,9 +590,9 @@ struct kobject *kobject_get(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
- WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
- "initialized, yet kobject_get() is being "
- "called.\n", kobject_name(kobj), kobj);
+ WARN(1, KERN_WARNING
+ "kobject: '%s' (%p): is not initialized, yet kobject_get() is being called.\n",
+ kobject_name(kobj), kobj);
kref_get(&kobj->kref);
}
return kobj;
@@ -624,8 +622,7 @@ static void kobject_cleanup(struct kobject *kobj)
kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
- pr_debug("kobject: '%s' (%p): does not have a release() "
- "function, it is broken and must be fixed.\n",
+ pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n",
kobject_name(kobj), kobj);
/* send "remove" if the caller did not do it but sent "add" */
@@ -688,9 +685,9 @@ void kobject_put(struct kobject *kobj)
{
if (kobj) {
if (!kobj->state_initialized)
- WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
- "initialized, yet kobject_put() is being "
- "called.\n", kobject_name(kobj), kobj);
+ WARN(1, KERN_WARNING
+ "kobject: '%s' (%p): is not initialized, yet kobject_put() is being called.\n",
+ kobject_name(kobj), kobj);
kref_put(&kobj->kref, kobject_release);
}
}
@@ -754,8 +751,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
retval = kobject_add(kobj, parent, "%s", name);
if (retval) {
- printk(KERN_WARNING "%s: kobject_add error: %d\n",
- __func__, retval);
+ pr_warn("%s: kobject_add error: %d\n", __func__, retval);
kobject_put(kobj);
kobj = NULL;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 140e9cacefff..4d1e6fecd385 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3155,10 +3155,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
/* + transport layer */
- if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
- hdr_len += tcp_hdrlen(skb);
- else
- hdr_len += sizeof(struct udphdr);
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+ const struct tcphdr *th;
+ struct tcphdr _tcphdr;
+
+ th = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_tcphdr), &_tcphdr);
+ if (likely(th))
+ hdr_len += __tcp_hdrlen(th);
+ } else {
+ struct udphdr _udphdr;
+
+ if (skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_udphdr), &_udphdr))
+ hdr_len += sizeof(struct udphdr);
+ }
if (shinfo->gso_type & SKB_GSO_DODGY)
gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
@@ -3233,15 +3244,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+ const struct netprio_map *map;
+ const struct sock *sk;
+ unsigned int prioidx;
+
+ if (skb->priority)
+ return;
+ map = rcu_dereference_bh(skb->dev->priomap);
+ if (!map)
+ return;
+ sk = skb_to_full_sk(skb);
+ if (!sk)
+ return;
- if (!skb->priority && skb->sk && map) {
- unsigned int prioidx =
- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
+ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
- if (prioidx < map->priomap_len)
- skb->priority = map->priomap[prioidx];
- }
+ if (prioidx < map->priomap_len)
+ skb->priority = map->priomap[prioidx];
}
#else
#define skb_update_prio(skb)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index a0adfc31a3fe..ec39f49b042d 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1723,7 +1723,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
@@ -1732,7 +1732,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_table_put:
-err_skb_send_alloc:
genlmsg_cancel(skb, hdr);
nlmsg_free(skb);
return err;
@@ -1976,7 +1975,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
table->counters_enabled,
&dump_ctx);
if (err)
- goto err_entries_dump;
+ return err;
send_done:
nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
@@ -1984,16 +1983,10 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(dump_ctx.skb, info);
-
-err_entries_dump:
-err_skb_send_alloc:
- genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
- nlmsg_free(dump_ctx.skb);
- return err;
}
static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
@@ -2132,7 +2125,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(skb, info);
@@ -2140,7 +2133,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_table_put:
-err_skb_send_alloc:
genlmsg_cancel(skb, hdr);
nlmsg_free(skb);
return err;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index abcc1f243f8c..73da82ae6223 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -776,8 +776,8 @@ ip_proto_again:
out_good:
ret = true;
- key_control->thoff = (u16)nhoff;
out:
+ key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
@@ -785,7 +785,6 @@ out:
out_bad:
ret = false;
- key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
goto out;
}
EXPORT_SYMBOL(__skb_flow_dissect);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index dadb5eef91c3..b6acd49a8d58 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -533,7 +533,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
- hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
if (n->parms->dead) {
rc = ERR_PTR(-EINVAL);
@@ -545,7 +545,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
n1 != NULL;
n1 = rcu_dereference_protected(n1->next,
lockdep_is_held(&tbl->lock))) {
- if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
+ if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
if (want_ref)
neigh_hold(n1);
rc = n1;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ce0b435e1d0..6bcb101fd501 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3754,7 +3754,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk);
+ sk->sk_error_report(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index b1e0dbea1e8c..bae73033f8c9 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -93,6 +93,16 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
return more_reuse;
}
+static void reuseport_free_rcu(struct rcu_head *head)
+{
+ struct sock_reuseport *reuse;
+
+ reuse = container_of(head, struct sock_reuseport, rcu);
+ if (reuse->prog)
+ bpf_prog_destroy(reuse->prog);
+ kfree(reuse);
+}
+
/**
* reuseport_add_sock - Add a socket to the reuseport group of another.
* @sk: New socket to add to the group.
@@ -101,7 +111,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
*/
int reuseport_add_sock(struct sock *sk, struct sock *sk2)
{
- struct sock_reuseport *reuse;
+ struct sock_reuseport *old_reuse, *reuse;
if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
int err = reuseport_alloc(sk2);
@@ -112,10 +122,13 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
spin_lock_bh(&reuseport_lock);
reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock)),
- WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock)),
- "socket already in reuseport group");
+ lockdep_is_held(&reuseport_lock));
+ old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
+ lockdep_is_held(&reuseport_lock));
+ if (old_reuse && old_reuse->num_socks != 1) {
+ spin_unlock_bh(&reuseport_lock);
+ return -EBUSY;
+ }
if (reuse->num_socks == reuse->max_socks) {
reuse = reuseport_grow(reuse);
@@ -133,19 +146,11 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
spin_unlock_bh(&reuseport_lock);
+ if (old_reuse)
+ call_rcu(&old_reuse->rcu, reuseport_free_rcu);
return 0;
}
-static void reuseport_free_rcu(struct rcu_head *head)
-{
- struct sock_reuseport *reuse;
-
- reuse = container_of(head, struct sock_reuseport, rcu);
- if (reuse->prog)
- bpf_prog_destroy(reuse->prog);
- kfree(reuse);
-}
-
void reuseport_detach_sock(struct sock *sk)
{
struct sock_reuseport *reuse;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index e1295d5f2c56..97791b0b1b51 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
ccid2_pr_debug("RTO_EXPIRE\n");
+ if (sk->sk_state == DCCP_CLOSED)
+ goto out;
+
/* back-off timer */
hc->tx_rto <<= 1;
if (hc->tx_rto > DCCP_RTO_MAX)
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 1a4c585f3950..c7ef9418e7fd 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -205,9 +205,13 @@ static inline void lowpan_netlink_fini(void)
static int lowpan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct wpan_dev *wpan_dev;
- if (wdev->type != ARPHRD_IEEE802154)
+ if (ndev->type != ARPHRD_IEEE802154)
+ return NOTIFY_DONE;
+ wpan_dev = ndev->ieee802154_ptr;
+ if (!wpan_dev)
return NOTIFY_DONE;
switch (event) {
@@ -216,8 +220,8 @@ static int lowpan_device_event(struct notifier_block *unused,
* also delete possible lowpan interfaces which belongs
* to the wpan interface.
*/
- if (wdev->ieee802154_ptr->lowpan_dev)
- lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
+ if (wpan_dev->lowpan_dev)
+ lowpan_dellink(wpan_dev->lowpan_dev, NULL);
break;
default:
return NOTIFY_DONE;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8b52179ddc6e..cf7e25ddb96f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
static int arp_constructor(struct neighbour *neigh)
{
- __be32 addr = *(__be32 *)neigh->primary_key;
+ __be32 addr;
struct net_device *dev = neigh->dev;
struct in_device *in_dev;
struct neigh_parms *parms;
+ u32 inaddr_any = INADDR_ANY;
+ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+ memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
+
+ addr = *(__be32 *)neigh->primary_key;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (!in_dev) {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 12850ed4cd91..bccdb1f7b18b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -632,6 +632,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
cfg->fc_encap, fi->fib_nh, cfg))
return 1;
}
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ if (cfg->fc_flow &&
+ cfg->fc_flow != fi->fib_nh->nh_tclassid)
+ return 1;
+#endif
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
return 0;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index a4be114b5fac..4a3572d9a9c3 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -119,6 +119,9 @@ out:
static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
{
+ if (!hlist_unhashed(&q->list_evictor))
+ return false;
+
return q->net->low_thresh == 0 ||
frag_mem_limit(q->net) >= q->net->low_thresh;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 1b3fa8f6dfc3..116f336dfcbb 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -257,7 +257,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
return -EINVAL;
- ipc->oif = src_info->ipi6_ifindex;
+ if (src_info->ipi6_ifindex)
+ ipc->oif = src_info->ipi6_ifindex;
ipc->addr = src_info->ipi6_addr.s6_addr32[3];
continue;
}
@@ -287,7 +288,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
return -EINVAL;
info = (struct in_pktinfo *)CMSG_DATA(cmsg);
- ipc->oif = info->ipi_ifindex;
+ if (info->ipi_ifindex)
+ ipc->oif = info->ipi_ifindex;
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 198d0e3aacd1..1b4e5ab81653 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -126,10 +126,11 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
-static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
+static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+
/*
* Interface to generic destination cache.
*/
@@ -2775,6 +2776,7 @@ void ip_rt_multicast_event(struct in_device *in_dev)
static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8;
+static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
void __user *buffer,
@@ -2890,7 +2892,8 @@ static struct ctl_table ipv4_route_table[] = {
.data = &ip_rt_min_pmtu,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &ip_min_valid_pmtu,
},
{
.procname = "min_adv_mss",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b13a75974fdc..75a4c2ccd713 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2133,6 +2133,9 @@ adjudge_to_death:
tcp_send_active_reset(sk, GFP_ATOMIC);
__NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY);
+ } else if (!check_net(sock_net(sk))) {
+ /* Not possible to send reset; just close */
+ tcp_set_state(sk, TCP_CLOSE);
}
}
@@ -2236,6 +2239,12 @@ int tcp_disconnect(struct sock *sk, int flags)
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
+ if (sk->sk_frag.page) {
+ put_page(sk->sk_frag.page);
+ sk->sk_frag.page = NULL;
+ sk->sk_frag.offset = 0;
+ }
+
sk->sk_error_report(sk);
return err;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bb56d6da7078..1d598b0390fc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1946,6 +1946,7 @@ void tcp_enter_loss(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct sk_buff *skb;
+ bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
bool is_reneg; /* is receiver reneging on SACKs? */
bool mark_lost;
@@ -2007,15 +2008,12 @@ void tcp_enter_loss(struct sock *sk)
tp->high_seq = tp->snd_nxt;
tcp_ecn_queue_cwr(tp);
- /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO
- * if a previous recovery is underway, otherwise it may incorrectly
- * call a timeout spurious if some previously retransmitted packets
- * are s/acked (sec 3.2). We do not apply that retriction since
- * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS
- * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO
- * on PTMU discovery to avoid sending new data.
+ /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
+ * loss recovery is underway except recurring timeout(s) on
+ * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
*/
tp->frto = sysctl_tcp_frto &&
+ (new_recovery || icsk->icsk_retransmits) &&
!inet_csk(sk)->icsk_mtup.probe_size;
}
@@ -2694,18 +2692,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
tcp_try_undo_loss(sk, false))
return;
- /* The ACK (s)acks some never-retransmitted data meaning not all
- * the data packets before the timeout were lost. Therefore we
- * undo the congestion window and state. This is essentially
- * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
- * a retransmitted skb is permantly marked, we can apply such an
- * operation even if F-RTO was not used.
- */
- if ((flag & FLAG_ORIG_SACK_ACKED) &&
- tcp_try_undo_loss(sk, tp->undo_marker))
- return;
-
if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
+ /* Step 3.b. A timeout is spurious if not all data are
+ * lost, i.e., never-retransmitted data are (s)acked.
+ */
+ if ((flag & FLAG_ORIG_SACK_ACKED) &&
+ tcp_try_undo_loss(sk, true))
+ return;
+
if (after(tp->snd_nxt, tp->high_seq)) {
if (flag & FLAG_DATA_SACKED || is_dupack)
tp->frto = 0; /* Step 3.a. loss was real */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index e9af1879cd53..14ac7df95380 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -50,11 +50,19 @@ static void tcp_write_err(struct sock *sk)
* to prevent DoS attacks. It is called when a retransmission timeout
* or zero probe timeout occurs on orphaned socket.
*
+ * Also close if our net namespace is exiting; in that case there is no
+ * hope of ever communicating again since all netns interfaces are already
+ * down (or about to be down), and we need to release our dst references,
+ * which have been moved to the netns loopback interface, so the namespace
+ * can finish exiting. This condition is only possible if we are a kernel
+ * socket, as those do not hold references to the namespace.
+ *
* Criteria is still not confirmed experimentally and may change.
* We kill the socket, if:
* 1. If number of orphaned sockets exceeds an administratively configured
* limit.
* 2. If we have strong memory pressure.
+ * 3. If our net namespace is exiting.
*/
static int tcp_out_of_resources(struct sock *sk, bool do_reset)
{
@@ -83,6 +91,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
return 1;
}
+
+ if (!check_net(sock_net(sk))) {
+ /* Not possible to send reset; just close */
+ tcp_done(sk);
+ return 1;
+ }
+
return 0;
}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 5c786f5ab961..0c73e8c10121 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_addr *daddr;
+ struct in6_addr *daddr, old_daddr;
+ __be32 fl6_flowlabel = 0;
+ __be32 old_fl6_flowlabel;
+ __be16 old_dport;
int addr_type;
int err;
- __be32 fl6_flowlabel = 0;
if (usin->sin6_family == AF_INET) {
if (__ipv6_only_sock(sk))
@@ -239,9 +241,13 @@ ipv4_connected:
}
}
+ /* save the current peer information before updating it */
+ old_daddr = sk->sk_v6_daddr;
+ old_fl6_flowlabel = np->flow_label;
+ old_dport = inet->inet_dport;
+
sk->sk_v6_daddr = *daddr;
np->flow_label = fl6_flowlabel;
-
inet->inet_dport = usin->sin6_port;
/*
@@ -251,11 +257,12 @@ ipv4_connected:
err = ip6_datagram_dst_update(sk, true);
if (err) {
- /* Reset daddr and dport so that udp_v6_early_demux()
- * fails to find this socket
+ /* Restore the socket peer info, to keep it consistent with
+ * the old socket state
*/
- memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
- inet->inet_dport = 0;
+ sk->sk_v6_daddr = old_daddr;
+ np->flow_label = old_fl6_flowlabel;
+ inet->inet_dport = old_dport;
goto out;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6099fc9f3f48..22886afa6bc9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
nt->dev = dev;
nt->net = dev_net(dev);
- ip6gre_tnl_link_config(nt, 1);
if (register_netdevice(dev) < 0)
goto failed_free;
+ ip6gre_tnl_link_config(nt, 1);
+
/* Can use a lockless transmit, unless we generate output sequences */
if (!(nt->parms.o_flags & TUNNEL_SEQ))
dev->features |= NETIF_F_LLTX;
@@ -1305,7 +1306,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
static int ip6gre_tap_init(struct net_device *dev)
{
- struct ip6_tnl *tunnel;
int ret;
ret = ip6gre_tunnel_init_common(dev);
@@ -1314,10 +1314,6 @@ static int ip6gre_tap_init(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- tunnel = netdev_priv(dev);
-
- ip6gre_tnl_link_config(tunnel, 1);
-
return 0;
}
@@ -1410,12 +1406,16 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
nt->dev = dev;
nt->net = dev_net(dev);
- ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
err = register_netdevice(dev);
if (err)
goto out;
+ ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+
+ if (tb[IFLA_MTU])
+ ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+
dev_hold(dev);
ip6gre_tunnel_link(ign, nt);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 083b8622551f..2ebf11514dd0 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1207,14 +1207,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
v6_cork->tclass = ipc6->tclass;
if (rt->dst.flags & DST_XFRM_TUNNEL)
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
else
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+ READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
}
+ if (mtu < IPV6_MIN_MTU)
+ return -EINVAL;
cork->base.fragsize = mtu;
if (dst_allfrag(rt->dst.path))
cork->base.flags |= IPCORK_ALLFRAG;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index b0e2bf1f4212..fb0b3f35f358 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -495,6 +495,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
return ERR_PTR(-ENOENT);
it->mrt = mrt;
+ it->cache = NULL;
return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
@@ -1792,7 +1793,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
ret = 0;
if (!ip6mr_new_table(net, v))
ret = -ENOMEM;
- raw6_sk(sk)->ip6mr_table = v;
+ else
+ raw6_sk(sk)->ip6mr_table = v;
rtnl_unlock();
return ret;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index e1bd2841aef1..3de90bdc48d4 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1546,7 +1546,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
*(opt++) = (rd_len >> 3);
opt += 6;
- memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
+ skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
+ rd_len - 8);
}
void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 6a495490d43e..fd7104323e53 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev,
/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
static int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
{
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct dst_entry *dst = skb_dst(skb);
+ struct net *net = dev_net(dst->dev);
struct ipv6hdr *hdr, *inner_hdr;
struct ipv6_sr_hdr *isrh;
int hdrlen, tot_len, err;
@@ -127,7 +128,7 @@ static int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
isrh->nexthdr = NEXTHDR_IPV6;
hdr->daddr = isrh->segments[isrh->first_segment];
- set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr);
+ set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
#ifdef CONFIG_IPV6_SEG6_HMAC
if (sr_has_hmac(isrh)) {
@@ -376,7 +377,7 @@ static int seg6_build_state(struct nlattr *nla,
slwt = seg6_lwt_lwtunnel(newts);
- err = dst_cache_init(&slwt->cache, GFP_KERNEL);
+ err = dst_cache_init(&slwt->cache, GFP_ATOMIC);
if (err) {
kfree(newts);
return err;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 647bb7551659..6b3837357513 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel *t = netdev_priv(dev);
- if (t->dev == sitn->fb_tunnel_dev) {
+ if (dev == sitn->fb_tunnel_dev) {
ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
t->ip6rd.relay_prefix = 0;
t->ip6rd.prefixlen = 16;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 2cf9d59f1b72..293eba071cec 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2435,9 +2435,11 @@ static int afiucv_iucv_init(void)
af_iucv_dev->driver = &af_iucv_driver;
err = device_register(af_iucv_dev);
if (err)
- goto out_driver;
+ goto out_iucv_dev;
return 0;
+out_iucv_dev:
+ put_device(af_iucv_dev);
out_driver:
driver_unregister(&af_iucv_driver);
out_iucv:
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 0ddcb209bea6..fefccc4d29b8 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1377,19 +1377,25 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
struct list_head *head;
int index = 0;
struct strp_callbacks cb;
- int err;
+ int err = 0;
csk = csock->sk;
if (!csk)
return -EINVAL;
+ lock_sock(csk);
+
/* We must prevent loops or risk deadlock ! */
- if (csk->sk_family == PF_KCM)
- return -EOPNOTSUPP;
+ if (csk->sk_family == PF_KCM) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
- if (!psock)
- return -ENOMEM;
+ if (!psock) {
+ err = -ENOMEM;
+ goto out;
+ }
psock->mux = mux;
psock->sk = csk;
@@ -1403,7 +1409,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
err = strp_init(&psock->strp, csk, &cb);
if (err) {
kmem_cache_free(kcm_psockp, psock);
- return err;
+ goto out;
}
sock_hold(csk);
@@ -1439,7 +1445,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
/* Schedule RX work in case there are already bytes queued */
strp_check_rcv(&psock->strp);
- return 0;
+out:
+ release_sock(csk);
+
+ return err;
}
static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
@@ -1491,6 +1500,7 @@ static void kcm_unattach(struct kcm_psock *psock)
if (WARN_ON(psock->rx_kcm)) {
write_unlock_bh(&csk->sk_callback_lock);
+ release_sock(csk);
return;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5d52aa270a44..2a32d13c6060 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2387,13 +2387,14 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
struct nlmsghdr *,
struct netlink_ext_ack *))
{
- struct netlink_ext_ack extack = {};
+ struct netlink_ext_ack extack;
struct nlmsghdr *nlh;
int err;
while (skb->len >= nlmsg_total_size(0)) {
int msglen;
+ memset(&extack, 0, sizeof(extack));
nlh = nlmsg_hdr(skb);
err = 0;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 10f8b4cff40a..680aebb87888 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1080,6 +1080,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
{
struct sk_buff *tmp;
struct net *net, *prev = NULL;
+ bool delivered = false;
int err;
for_each_net_rcu(net) {
@@ -1091,14 +1092,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
}
err = nlmsg_multicast(prev->genl_sock, tmp,
portid, group, flags);
- if (err)
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
goto error;
}
prev = net;
}
- return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
+ return err;
+ return delivered ? 0 : -ESRCH;
error:
kfree_skb(skb);
return err;
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 5dab1ff3a6c2..59d328603312 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -391,7 +391,7 @@ send_fragmentable:
(char *)&opt, sizeof(opt));
if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg,
- iov, 1, iov[0].iov_len);
+ iov, 2, len);
opt = IPV6_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 1b77f5a69c23..6f944c8c5aa2 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -830,8 +830,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (tca[TCA_CHAIN] &&
nla_get_u32(tca[TCA_CHAIN]) != chain->index)
continue;
- if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
+ if (!tcf_chain_dump(chain, skb, cb, index_start, &index)) {
+ err = -EMSGSIZE;
break;
+ }
}
cb->args[0] = index;
@@ -840,6 +842,9 @@ errout:
if (cl)
cops->put(q, cl);
out:
+ /* If we did no progress, the error (EMSGSIZE) is real */
+ if (skb->len == 0 && err)
+ return err;
return skb->len;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 34f11a54d369..282812a87fd4 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -875,7 +875,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
- if (n->flags != flags)
+ if ((n->flags ^ flags) &
+ ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW))
return -EINVAL;
new = u32_init_knode(tp, n);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 14d1724e0dc4..4fbb0c82996f 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -513,7 +513,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
if (unlikely(sch->q.qlen >= sch->limit))
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_all(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index c52e6f7f43a1..9f77152cac1f 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -324,8 +324,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
- if (!IS_ERR(bdst) &&
- ipv6_chk_addr(dev_net(bdst->dev),
+ if (IS_ERR(bdst))
+ continue;
+
+ if (ipv6_chk_addr(dev_net(bdst->dev),
&laddr->a.v6.sin6_addr, bdst->dev, 1)) {
if (!IS_ERR_OR_NULL(dst))
dst_release(dst);
@@ -334,8 +336,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
- if (matchlen > bmatchlen)
+ if (matchlen > bmatchlen) {
+ dst_release(bdst);
continue;
+ }
if (!IS_ERR_OR_NULL(dst))
dst_release(dst);
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 2f4eacdde3f2..8810d7b889ec 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -71,6 +71,7 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
if (IS_ERR(mw->fmr.fm_mr))
goto out_fmr_err;
+ INIT_LIST_HEAD(&mw->mw_list);
return 0;
out_fmr_err:
@@ -101,10 +102,6 @@ fmr_op_release_mr(struct rpcrdma_mw *r)
LIST_HEAD(unmap_list);
int rc;
- /* Ensure MW is not on any rl_registered list */
- if (!list_empty(&r->mw_list))
- list_del(&r->mw_list);
-
kfree(r->fmr.fm_physaddrs);
kfree(r->mw_sg);
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 77e1e0ba041d..a9f36d73290e 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -109,6 +109,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
if (!r->mw_sg)
goto out_list_err;
+ INIT_LIST_HEAD(&r->mw_list);
sg_init_table(r->mw_sg, depth);
init_completion(&f->fr_linv_done);
return 0;
@@ -132,10 +133,6 @@ frwr_op_release_mr(struct rpcrdma_mw *r)
{
int rc;
- /* Ensure MW is not on any rl_registered list */
- if (!list_empty(&r->mw_list))
- list_del(&r->mw_list);
-
rc = ib_dereg_mr(r->frmr.fr_mr);
if (rc)
pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
@@ -192,11 +189,11 @@ frwr_op_recover_mr(struct rpcrdma_mw *mw)
return;
out_release:
- pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
+ pr_err("rpcrdma: FRMR reset failed %d, %p released\n", rc, mw);
r_xprt->rx_stats.mrs_orphaned++;
spin_lock(&r_xprt->rx_buf.rb_mwlock);
- list_del(&mw->mw_all);
+ list_del_init(&mw->mw_all);
spin_unlock(&r_xprt->rx_buf.rb_mwlock);
frwr_op_release_mr(mw);
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 1f321a47109f..02d629900c8c 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -229,7 +229,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
/* alloc the pagelist for receiving buffer */
ppages[p] = alloc_page(GFP_ATOMIC);
if (!ppages[p])
- return -EAGAIN;
+ return -ENOBUFS;
}
seg[n].mr_page = ppages[p];
seg[n].mr_offset = (void *)(unsigned long) page_base;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 36e9e03d226f..f63c18be97a7 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1143,6 +1143,11 @@ rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf)
list_del(&mw->mw_all);
spin_unlock(&buf->rb_mwlock);
+
+ /* Ensure MW is not on any rl_registered list */
+ if (!list_empty(&mw->mw_list))
+ list_del(&mw->mw_list);
+
ia->ri_ops->ro_release_mr(mw);
count++;
spin_lock(&buf->rb_mwlock);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index ee3f1ba6be25..f29d5d54cdc6 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -384,7 +384,7 @@ rpcrdma_pop_mw(struct list_head *list)
struct rpcrdma_mw *mw;
mw = list_first_entry(list, struct rpcrdma_mw, mw_list);
- list_del(&mw->mw_list);
+ list_del_init(&mw->mw_list);
return mw;
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index aeef8011ac7d..8be847de34f9 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1854,36 +1854,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
if (strcmp(name, tipc_bclink_name) == 0) {
err = tipc_nl_add_bc_link(net, &msg);
- if (err) {
- nlmsg_free(msg.skb);
- return err;
- }
+ if (err)
+ goto err_free;
} else {
int bearer_id;
struct tipc_node *node;
struct tipc_link *link;
node = tipc_node_find_by_name(net, name, &bearer_id);
- if (!node)
- return -EINVAL;
+ if (!node) {
+ err = -EINVAL;
+ goto err_free;
+ }
tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (!link) {
tipc_node_read_unlock(node);
- nlmsg_free(msg.skb);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
err = __tipc_nl_add_link(net, &msg, link, 0);
tipc_node_read_unlock(node);
- if (err) {
- nlmsg_free(msg.skb);
- return err;
- }
+ if (err)
+ goto err_free;
}
return genlmsg_reply(msg.skb, info);
+
+err_free:
+ nlmsg_free(msg.skb);
+ return err;
}
int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 297b079ae4d9..27aac273205b 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -745,7 +745,7 @@ int conf_write(const char *name)
struct menu *menu;
const char *basename;
const char *str;
- char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1];
+ char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
char *env;
dirname[0] = 0;
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996dd9c1..ed29bad1f03a 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -113,7 +113,7 @@ void expr_free(struct expr *e)
break;
case E_NOT:
expr_free(e->left.expr);
- return;
+ break;
case E_EQUAL:
case E_GEQ:
case E_GTH:
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index e9357931b47d..749c2bd5fc51 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -372,6 +372,7 @@ void menu_finalize(struct menu *parent)
menu->parent = parent;
last_menu = menu;
}
+ expr_free(basedep);
if (last_menu) {
parent->list = parent->next;
parent->next = last_menu->next;
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 001305fa080b..c56f7e549980 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -108,7 +108,27 @@ static struct menu *current_menu, *current_entry;
%%
input: nl start | start;
-start: mainmenu_stmt stmt_list | stmt_list;
+start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list;
+
+/* mainmenu entry */
+
+mainmenu_stmt: T_MAINMENU prompt nl
+{
+ menu_add_prompt(P_MENU, $2, NULL);
+};
+
+/* Default main menu, if there's no mainmenu entry */
+
+no_mainmenu_stmt: /* empty */
+{
+ /*
+ * Hack: Keep the main menu title on the heap so we can safely free it
+ * later regardless of whether it comes from the 'prompt' in
+ * mainmenu_stmt or here
+ */
+ menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
+};
+
stmt_list:
/* empty */
@@ -351,13 +371,6 @@ if_block:
| if_block choice_stmt
;
-/* mainmenu entry */
-
-mainmenu_stmt: T_MAINMENU prompt nl
-{
- menu_add_prompt(P_MENU, $2, NULL);
-};
-
/* menu entry */
menu: T_MENU prompt T_EOL
@@ -502,6 +515,7 @@ word_opt: /* empty */ { $$ = NULL; }
void conf_parse(const char *name)
{
+ const char *tmp;
struct symbol *sym;
int i;
@@ -509,7 +523,6 @@ void conf_parse(const char *name)
sym_init();
_menu_init();
- rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
if (getenv("ZCONF_DEBUG"))
zconfdebug = 1;
@@ -519,8 +532,10 @@ void conf_parse(const char *name)
if (!modules_sym)
modules_sym = sym_find( "n" );
+ tmp = rootmenu.prompt->text;
rootmenu.prompt->text = _(rootmenu.prompt->text);
rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
+ free((char*)tmp);
menu_finalize(&rootmenu);
for_all_symbols(i, sym) {
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 539be16782f3..5d1826a27969 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -27,6 +27,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/firmware.h>
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/pci.h>
@@ -990,6 +991,7 @@ struct ca0132_spec {
enum {
QUIRK_NONE,
QUIRK_ALIENWARE,
+ QUIRK_ALIENWARE_M17XR4,
QUIRK_SBZ,
QUIRK_R3DI,
};
@@ -1039,6 +1041,7 @@ static const struct hda_pintbl r3di_pincfgs[] = {
};
static const struct snd_pci_quirk ca0132_quirks[] = {
+ SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
@@ -5115,8 +5118,7 @@ static int ca0132_alt_eq_preset_put(struct snd_kcontrol *kcontrol,
static int ca0132_voicefx_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- unsigned int items = sizeof(ca0132_voicefx_presets)
- / sizeof(struct ct_voicefx_preset);
+ unsigned int items = ARRAY_SIZE(ca0132_voicefx_presets);
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
@@ -5145,10 +5147,8 @@ static int ca0132_voicefx_put(struct snd_kcontrol *kcontrol,
struct ca0132_spec *spec = codec->spec;
int i, err = 0;
int sel = ucontrol->value.enumerated.item[0];
- unsigned int items = sizeof(ca0132_voicefx_presets)
- / sizeof(struct ct_voicefx_preset);
- if (sel >= items)
+ if (sel >= ARRAY_SIZE(ca0132_voicefx_presets))
return 0;
codec_dbg(codec, "ca0132_voicefx_put: sel=%d, preset=%s\n",
@@ -5665,7 +5665,7 @@ static const char * const ca0132_alt_slave_pfxs[] = {
* I think this has to do with the pin for rear surround being 0x11,
* and the center/lfe being 0x10. Usually the pin order is the opposite.
*/
-const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
+static const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
{ .channels = 2,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
{ .channels = 4,
@@ -5968,7 +5968,7 @@ static int ca0132_build_pcms(struct hda_codec *codec)
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
/* With the DSP enabled, desktops don't use this ADC. */
- if (spec->use_alt_functions) {
+ if (!spec->use_alt_functions) {
info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
if (!info)
return -ENOMEM;
@@ -6132,7 +6132,10 @@ static void ca0132_init_dmic(struct hda_codec *codec)
* Bit 6: set to select Data2, clear for Data1
* Bit 7: set to enable DMic, clear for AMic
*/
- val = 0x23;
+ if (spec->quirk == QUIRK_ALIENWARE_M17XR4)
+ val = 0x33;
+ else
+ val = 0x23;
/* keep a copy of dmic ctl val for enable/disable dmic purpuse */
spec->dmic_ctl = val;
snd_hda_codec_write(codec, spec->input_pins[0], 0,
@@ -7225,7 +7228,7 @@ static int ca0132_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, spec->base_init_verbs);
- if (spec->quirk != QUIRK_NONE)
+ if (spec->use_alt_functions)
ca0132_alt_init(codec);
ca0132_download_dsp(codec);
@@ -7239,8 +7242,9 @@ static int ca0132_init(struct hda_codec *codec)
case QUIRK_R3DI:
r3di_setup_defaults(codec);
break;
- case QUIRK_NONE:
- case QUIRK_ALIENWARE:
+ case QUIRK_SBZ:
+ break;
+ default:
ca0132_setup_defaults(codec);
ca0132_init_analog_mic2(codec);
ca0132_init_dmic(codec);
@@ -7345,7 +7349,6 @@ static const struct hda_codec_ops ca0132_patch_ops = {
static void ca0132_config(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- struct auto_pin_cfg *cfg = &spec->autocfg;
spec->dacs[0] = 0x2;
spec->dacs[1] = 0x3;
@@ -7407,12 +7410,7 @@ static void ca0132_config(struct hda_codec *codec)
/* SPDIF I/O */
spec->dig_out = 0x05;
spec->multiout.dig_out_nid = spec->dig_out;
- cfg->dig_out_pins[0] = 0x0c;
- cfg->dig_outs = 1;
- cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
spec->dig_in = 0x09;
- cfg->dig_in_pin = 0x0e;
- cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
break;
case QUIRK_R3DI:
codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
@@ -7440,9 +7438,6 @@ static void ca0132_config(struct hda_codec *codec)
/* SPDIF I/O */
spec->dig_out = 0x05;
spec->multiout.dig_out_nid = spec->dig_out;
- cfg->dig_out_pins[0] = 0x0c;
- cfg->dig_outs = 1;
- cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
break;
default:
spec->num_outputs = 2;
@@ -7465,12 +7460,7 @@ static void ca0132_config(struct hda_codec *codec)
/* SPDIF I/O */
spec->dig_out = 0x05;
spec->multiout.dig_out_nid = spec->dig_out;
- cfg->dig_out_pins[0] = 0x0c;
- cfg->dig_outs = 1;
- cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
spec->dig_in = 0x09;
- cfg->dig_in_pin = 0x0e;
- cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
break;
}
}
@@ -7478,7 +7468,7 @@ static void ca0132_config(struct hda_codec *codec)
static int ca0132_prepare_verbs(struct hda_codec *codec)
{
/* Verbs + terminator (an empty element) */
-#define NUM_SPEC_VERBS 4
+#define NUM_SPEC_VERBS 2
struct ca0132_spec *spec = codec->spec;
spec->chip_init_verbs = ca0132_init_verbs0;
@@ -7488,34 +7478,24 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
if (!spec->spec_init_verbs)
return -ENOMEM;
- /* HP jack autodetection */
- spec->spec_init_verbs[0].nid = spec->unsol_tag_hp;
- spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE;
- spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp;
-
- /* MIC1 jack autodetection */
- spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1;
- spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE;
- spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1;
-
/* config EAPD */
- spec->spec_init_verbs[2].nid = 0x0b;
- spec->spec_init_verbs[2].param = 0x78D;
- spec->spec_init_verbs[2].verb = 0x00;
+ spec->spec_init_verbs[0].nid = 0x0b;
+ spec->spec_init_verbs[0].param = 0x78D;
+ spec->spec_init_verbs[0].verb = 0x00;
/* Previously commented configuration */
/*
- spec->spec_init_verbs[3].nid = 0x0b;
- spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE;
+ spec->spec_init_verbs[2].nid = 0x0b;
+ spec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE;
+ spec->spec_init_verbs[2].verb = 0x02;
+
+ spec->spec_init_verbs[3].nid = 0x10;
+ spec->spec_init_verbs[3].param = 0x78D;
spec->spec_init_verbs[3].verb = 0x02;
spec->spec_init_verbs[4].nid = 0x10;
- spec->spec_init_verbs[4].param = 0x78D;
+ spec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE;
spec->spec_init_verbs[4].verb = 0x02;
-
- spec->spec_init_verbs[5].nid = 0x10;
- spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE;
- spec->spec_init_verbs[5].verb = 0x02;
*/
/* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9322b6a1019c..201415b30594 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2545,6 +2545,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
+ SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -4995,7 +4996,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->shutup = alc_no_shutup; /* reduce click noise */
spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
codec->power_save_node = 0; /* avoid click noises */
@@ -5394,6 +5394,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
+static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
+ hda_fixup_thinkpad_acpi(codec, fix, action);
+}
+
/* for dell wmi mic mute led */
#include "dell_wmi_helper.c"
@@ -5946,7 +5953,7 @@ static const struct hda_fixup alc269_fixups[] = {
},
[ALC269_FIXUP_THINKPAD_ACPI] = {
.type = HDA_FIXUP_FUNC,
- .v.func = hda_fixup_thinkpad_acpi,
+ .v.func = alc_fixup_thinkpad_acpi,
.chained = true,
.chain_id = ALC269_FIXUP_SKU_IGNORE,
},
@@ -6603,8 +6610,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
- SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x3136, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6782,6 +6790,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170110},
{0x19, 0x02a11030},
{0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+ {0x14, 0x90170110},
+ {0x19, 0x02a11030},
+ {0x1a, 0x02a11040},
+ {0x1b, 0x01014020},
+ {0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60140},
{0x14, 0x90170110},