Merge 5.10.27 into android12-5.10

Changes in 5.10.27
	mm/memcg: rename mem_cgroup_split_huge_fixup to split_page_memcg and add nr_pages argument
	mm/memcg: set memcg when splitting page
	mt76: fix tx skb error handling in mt76_dma_tx_queue_skb
	net: stmmac: fix dma physical address of descriptor when display ring
	net: fec: ptp: avoid register access when ipg clock is disabled
	powerpc/4xx: Fix build errors from mfdcr()
	atm: eni: dont release is never initialized
	atm: lanai: dont run lanai_dev_close if not open
	Revert "r8152: adjust the settings about MAC clock speed down for RTL8153"
	ALSA: hda: ignore invalid NHLT table
	ixgbe: Fix memleak in ixgbe_configure_clsu32
	scsi: ufs: ufs-qcom: Disable interrupt in reset path
	blk-cgroup: Fix the recursive blkg rwstat
	net: tehuti: fix error return code in bdx_probe()
	net: intel: iavf: fix error return code of iavf_init_get_resources()
	sun/niu: fix wrong RXMAC_BC_FRM_CNT_COUNT count
	gianfar: fix jumbo packets+napi+rx overrun crash
	cifs: ask for more credit on async read/write code paths
	gfs2: fix use-after-free in trans_drain
	cpufreq: blacklist Arm Vexpress platforms in cpufreq-dt-platdev
	gpiolib: acpi: Add missing IRQF_ONESHOT
	nfs: fix PNFS_FLEXFILE_LAYOUT Kconfig default
	NFS: Correct size calculation for create reply length
	net: hisilicon: hns: fix error return code of hns_nic_clear_all_rx_fetch()
	net: wan: fix error return code of uhdlc_init()
	net: davicom: Use platform_get_irq_optional()
	net: enetc: set MAC RX FIFO to recommended value
	atm: uPD98402: fix incorrect allocation
	atm: idt77252: fix null-ptr-dereference
	cifs: change noisy error message to FYI
	irqchip/ingenic: Add support for the JZ4760
	kbuild: add image_name to no-sync-config-targets
	kbuild: dummy-tools: fix inverted tests for gcc
	umem: fix error return code in mm_pci_probe()
	sparc64: Fix opcode filtering in handling of no fault loads
	habanalabs: Call put_pid() when releasing control device
	staging: rtl8192e: fix kconfig dependency on CRYPTO
	u64_stats,lockdep: Fix u64_stats_init() vs lockdep
	kselftest: arm64: Fix exit code of sve-ptrace
	regulator: qcom-rpmh: Correct the pmic5_hfsmps515 buck
	block: Fix REQ_OP_ZONE_RESET_ALL handling
	drm/amd/display: Revert dram_clock_change_latency for DCN2.1
	drm/amdgpu: fb BO should be ttm_bo_type_device
	drm/radeon: fix AGP dependency
	nvme: simplify error logic in nvme_validate_ns()
	nvme: add NVME_REQ_CANCELLED flag in nvme_cancel_request()
	nvme-fc: set NVME_REQ_CANCELLED in nvme_fc_terminate_exchange()
	nvme-fc: return NVME_SC_HOST_ABORTED_CMD when a command has been aborted
	nvme-core: check ctrl css before setting up zns
	nvme-rdma: Fix a use after free in nvmet_rdma_write_data_done
	nvme-pci: add the DISABLE_WRITE_ZEROES quirk for a Samsung PM1725a
	nfs: we don't support removing system.nfs4_acl
	block: Suppress uevent for hidden device when removed
	mm/fork: clear PASID for new mm
	ia64: fix ia64_syscall_get_set_arguments() for break-based syscalls
	ia64: fix ptrace(PTRACE_SYSCALL_INFO_EXIT) sign
	static_call: Pull some static_call declarations to the type headers
	static_call: Allow module use without exposing static_call_key
	static_call: Fix the module key fixup
	static_call: Fix static_call_set_init()
	KVM: x86: Protect userspace MSR filter with SRCU, and set atomically-ish
	btrfs: fix sleep while in non-sleep context during qgroup removal
	selinux: don't log MAC_POLICY_LOAD record on failed policy load
	selinux: fix variable scope issue in live sidtab conversion
	netsec: restore phy power state after controller reset
	platform/x86: intel-vbtn: Stop reporting SW_DOCK events
	psample: Fix user API breakage
	z3fold: prevent reclaim/free race for headless pages
	squashfs: fix inode lookup sanity checks
	squashfs: fix xattr id and id lookup sanity checks
	hugetlb_cgroup: fix imbalanced css_get and css_put pair for shared mappings
	kasan: fix per-page tags for non-page_alloc pages
	gcov: fix clang-11+ support
	ACPI: video: Add missing callback back for Sony VPCEH3U1E
	ACPICA: Always create namespace nodes using acpi_ns_create_node()
	arm64: stacktrace: don't trace arch_stack_walk()
	arm64: dts: ls1046a: mark crypto engine dma coherent
	arm64: dts: ls1012a: mark crypto engine dma coherent
	arm64: dts: ls1043a: mark crypto engine dma coherent
	ARM: dts: at91: sam9x60: fix mux-mask for PA7 so it can be set to A, B and C
	ARM: dts: at91: sam9x60: fix mux-mask to match product's datasheet
	ARM: dts: at91-sama5d27_som1: fix phy address to 7
	integrity: double check iint_cache was initialized
	drm/etnaviv: Use FOLL_FORCE for userptr
	drm/amd/pm: workaround for audio noise issue
	drm/amdgpu/display: restore AUX_DPHY_TX_CONTROL for DCN2.x
	drm/amdgpu: Add additional Sienna Cichlid PCI ID
	drm/i915: Fix the GT fence revocation runtime PM logic
	dm verity: fix DM_VERITY_OPTS_MAX value
	dm ioctl: fix out of bounds array access when no devices
	bus: omap_l3_noc: mark l3 irqs as IRQF_NO_THREAD
	ARM: OMAP2+: Fix smartreflex init regression after dropping legacy data
	soc: ti: omap-prm: Fix occasional abort on reset deassert for dra7 iva
	veth: Store queue_mapping independently of XDP prog presence
	bpf: Change inode_storage's lookup_elem return value from NULL to -EBADF
	libbpf: Fix INSTALL flag order
	net/mlx5e: RX, Mind the MPWQE gaps when calculating offsets
	net/mlx5e: When changing XDP program without reset, take refs for XSK RQs
	net/mlx5e: Don't match on Geneve options in case option masks are all zero
	ipv6: fix suspecious RCU usage warning
	drop_monitor: Perform cleanup upon probe registration failure
	macvlan: macvlan_count_rx() needs to be aware of preemption
	net: sched: validate stab values
	net: dsa: bcm_sf2: Qualify phydev->dev_flags based on port
	igc: reinit_locked() should be called with rtnl_lock
	igc: Fix Pause Frame Advertising
	igc: Fix Supported Pause Frame Link Setting
	igc: Fix igc_ptp_rx_pktstamp()
	e1000e: add rtnl_lock() to e1000_reset_task
	e1000e: Fix error handling in e1000_set_d0_lplu_state_82571
	net/qlcnic: Fix a use after free in qlcnic_83xx_get_minidump_template
	net: phy: broadcom: Add power down exit reset state delay
	ftgmac100: Restart MAC HW once
	clk: qcom: gcc-sc7180: Use floor ops for the correct sdcc1 clk
	net: ipa: terminate message handler arrays
	net: qrtr: fix a kernel-infoleak in qrtr_recvmsg()
	flow_dissector: fix byteorder of dissected ICMP ID
	selftests/bpf: Set gopt opt_class to 0 if get tunnel opt failed
	netfilter: ctnetlink: fix dump of the expect mask attribute
	net: hdlc_x25: Prevent racing between "x25_close" and "x25_xmit"/"x25_rx"
	net: phylink: Fix phylink_err() function name error in phylink_major_config
	tipc: better validate user input in tipc_nl_retrieve_key()
	tcp: relookup sock for RST+ACK packets handled by obsolete req sock
	can: isotp: isotp_setsockopt(): only allow to set low level TX flags for CAN-FD
	can: isotp: TX-path: ensure that CAN frame flags are initialized
	can: peak_usb: add forgotten supported devices
	can: flexcan: flexcan_chip_freeze(): fix chip freeze for missing bitrate
	can: kvaser_pciefd: Always disable bus load reporting
	can: c_can_pci: c_can_pci_remove(): fix use-after-free
	can: c_can: move runtime PM enable/disable to c_can_platform
	can: m_can: m_can_do_rx_poll(): fix extraneous msg loss warning
	can: m_can: m_can_rx_peripheral(): fix RX being blocked by errors
	mac80211: fix rate mask reset
	mac80211: Allow HE operation to be longer than expected.
	selftests/net: fix warnings on reuseaddr_ports_exhausted
	nfp: flower: fix unsupported pre_tunnel flows
	nfp: flower: add ipv6 bit to pre_tunnel control message
	nfp: flower: fix pre_tun mask id allocation
	ftrace: Fix modify_ftrace_direct.
	drm/msm/dsi: fix check-before-set in the 7nm dsi_pll code
	ionic: linearize tso skb with too many frags
	net/sched: cls_flower: fix only mask bit check in the validate_ct_state
	netfilter: nftables: report EOPNOTSUPP on unsupported flowtable flags
	netfilter: nftables: allow to update flowtable flags
	netfilter: flowtable: Make sure GC works periodically in idle system
	libbpf: Fix error path in bpf_object__elf_init()
	libbpf: Use SOCK_CLOEXEC when opening the netlink socket
	ARM: dts: imx6ull: fix ubi filesystem mount failed
	ipv6: weaken the v4mapped source check
	octeontx2-af: Formatting debugfs entry rsrc_alloc.
	octeontx2-af: Modify default KEX profile to extract TX packet fields
	octeontx2-af: Remove TOS field from MKEX TX
	octeontx2-af: Fix irq free in rvu teardown
	octeontx2-pf: Clear RSS enable flag on interace down
	octeontx2-af: fix infinite loop in unmapping NPC counter
	net: check all name nodes in __dev_alloc_name
	net: cdc-phonet: fix data-interface release on probe failure
	igb: check timestamp validity
	r8152: limit the RX buffer size of RTL8153A for USB 2.0
	net: stmmac: dwmac-sun8i: Provide TX and RX fifo sizes
	selinux: vsock: Set SID for socket returned by accept()
	selftests: forwarding: vxlan_bridge_1d: Fix vxlan ecn decapsulate value
	libbpf: Fix BTF dump of pointer-to-array-of-struct
	bpf: Fix umd memory leak in copy_process()
	can: isotp: tx-path: zero initialize outgoing CAN frames
	drm/msm: fix shutdown hook in case GPU components failed to bind
	drm/msm: Fix suspend/resume on i.MX5
	arm64: kdump: update ppos when reading elfcorehdr
	PM: runtime: Defer suspending suppliers
	net/mlx5: Add back multicast stats for uplink representor
	net/mlx5e: Allow to match on MPLS parameters only for MPLS over UDP
	net/mlx5e: Offload tuple rewrite for non-CT flows
	net/mlx5e: Fix error path for ethtool set-priv-flag
	PM: EM: postpone creating the debugfs dir till fs_initcall
	net: bridge: don't notify switchdev for local FDB addresses
	octeontx2-af: Fix memory leak of object buf
	xen/x86: make XEN_BALLOON_MEMORY_HOTPLUG_LIMIT depend on MEMORY_HOTPLUG
	RDMA/cxgb4: Fix adapter LE hash errors while destroying ipv6 listening server
	bpf: Don't do bpf_cgroup_storage_set() for kuprobe/tp programs
	net: Consolidate common blackhole dst ops
	net, bpf: Fix ip6ip6 crash with collect_md populated skbs
	igb: avoid premature Rx buffer reuse
	net: axienet: Properly handle PCS/PMA PHY for 1000BaseX mode
	net: axienet: Fix probe error cleanup
	net: phy: introduce phydev->port
	net: phy: broadcom: Avoid forward for bcm54xx_config_clock_delay()
	net: phy: broadcom: Set proper 1000BaseX/SGMII interface mode for BCM54616S
	net: phy: broadcom: Fix RGMII delays for BCM50160 and BCM50610M
	Revert "netfilter: x_tables: Switch synchronization to RCU"
	netfilter: x_tables: Use correct memory barriers.
	dm table: Fix zoned model check and zone sectors check
	mm/mmu_notifiers: ensure range_end() is paired with range_start()
	Revert "netfilter: x_tables: Update remaining dereference to RCU"
	ACPI: scan: Rearrange memory allocation in acpi_device_add()
	ACPI: scan: Use unique number for instance_no
	perf auxtrace: Fix auxtrace queue conflict
	perf synthetic events: Avoid write of uninitialized memory when generating PERF_RECORD_MMAP* records
	io_uring: fix provide_buffers sign extension
	block: recalculate segment count for multi-segment discards correctly
	scsi: Revert "qla2xxx: Make sure that aborted commands are freed"
	scsi: qedi: Fix error return code of qedi_alloc_global_queues()
	scsi: mpt3sas: Fix error return code of mpt3sas_base_attach()
	smb3: fix cached file size problems in duplicate extents (reflink)
	cifs: Adjust key sizes and key generation routines for AES256 encryption
	locking/mutex: Fix non debug version of mutex_lock_io_nested()
	x86/mem_encrypt: Correct physical address calculation in __set_clr_pte_enc()
	mm/memcg: fix 5.10 backport of splitting page memcg
	fs/cachefiles: Remove wait_bit_key layout dependency
	ch_ktls: fix enum-conversion warning
	can: dev: Move device back to init netns on owning netns delete
	r8169: fix DMA being used after buffer free if WoL is enabled
	net: dsa: b53: VLAN filtering is global to all users
	mac80211: fix double free in ibss_leave
	ext4: add reclaim checks to xattr code
	fs/ext4: fix integer overflow in s_log_groups_per_flex
	Revert "xen: fix p2m size in dom0 for disabled memory hotplug case"
	Revert "net: bonding: fix error return code of bond_neigh_init()"
	nvme: fix the nsid value to print in nvme_validate_or_alloc_ns
	can: peak_usb: Revert "can: peak_usb: add forgotten supported devices"
	xen-blkback: don't leak persistent grants from xen_blkbk_map()
	Linux 5.10.27

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I7eafe976fd6bf33db6db4adb8ebf2ff087294a23
This commit is contained in:
Greg Kroah-Hartman
2021-03-30 17:03:59 +02:00
270 changed files with 2578 additions and 984 deletions

View File

@@ -4815,8 +4815,10 @@ If an MSR access is not permitted through the filtering, it generates a
allows user space to deflect and potentially handle various MSR accesses
into user space.
If a vCPU is in running state while this ioctl is invoked, the vCPU may
experience inconsistent filtering behavior on MSR accesses.
Note, invoking this ioctl with a vCPU is running is inherently racy. However,
KVM does guarantee that vCPUs will see either the previous filter or the new
filter, e.g. MSRs with identical settings in both the old and new filter will
have deterministic behavior.
5. The kvm_run structure

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 26
SUBLEVEL = 27
EXTRAVERSION =
NAME = Dare mighty things
@@ -265,7 +265,8 @@ no-dot-config-targets := $(clean-targets) \
$(version_h) headers headers_% archheaders archscripts \
%asm-generic kernelversion %src-pkg dt_binding_check \
outputmakefile
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
image_name
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
config-build :=

View File

@@ -334,14 +334,6 @@
};
&pinctrl {
atmel,mux-mask = <
/* A B C */
0xFFFFFE7F 0xC0E0397F 0xEF00019D /* pioA */
0x03FFFFFF 0x02FC7E68 0x00780000 /* pioB */
0xffffffff 0xF83FFFFF 0xB800F3FC /* pioC */
0x003FFFFF 0x003F8000 0x00000000 /* pioD */
>;
adc {
pinctrl_adc_default: adc_default {
atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;

View File

@@ -84,8 +84,8 @@
pinctrl-0 = <&pinctrl_macb0_default>;
phy-mode = "rmii";
ethernet-phy@0 {
reg = <0x0>;
ethernet-phy@7 {
reg = <0x7>;
interrupt-parent = <&pioA>;
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";

View File

@@ -14,5 +14,6 @@
};
&gpmi {
fsl,use-minimum-ecc;
status = "okay";
};

View File

@@ -606,6 +606,15 @@
compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
ranges = <0xfffff400 0xfffff400 0x800>;
/* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
atmel,mux-mask = <
/* A B C */
0xffffffff 0xffe03fff 0xef00019d /* pioA */
0x03ffffff 0x02fc7e7f 0x00780000 /* pioB */
0xffffffff 0xffffffff 0xf83fffff /* pioC */
0x003fffff 0x003f8000 0x00000000 /* pioD */
>;
pioA: gpio@fffff400 {
compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;

View File

@@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
extern struct omap_sr_data omap_sr_pdata[];
static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
static int __init sr_init_by_name(const char *name, const char *voltdm)
{
struct omap_sr_data *sr_data = NULL;
struct omap_volt_data *volt_data;
struct omap_smartreflex_dev_attr *sr_dev_attr;
static int i;
if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) ||
!strncmp(oh->name, "smartreflex_mpu", 16))
if (!strncmp(name, "smartreflex_mpu_iva", 20) ||
!strncmp(name, "smartreflex_mpu", 16))
sr_data = &omap_sr_pdata[OMAP_SR_MPU];
else if (!strncmp(oh->name, "smartreflex_core", 17))
else if (!strncmp(name, "smartreflex_core", 17))
sr_data = &omap_sr_pdata[OMAP_SR_CORE];
else if (!strncmp(oh->name, "smartreflex_iva", 16))
else if (!strncmp(name, "smartreflex_iva", 16))
sr_data = &omap_sr_pdata[OMAP_SR_IVA];
if (!sr_data) {
pr_err("%s: Unknown instance %s\n", __func__, oh->name);
pr_err("%s: Unknown instance %s\n", __func__, name);
return -EINVAL;
}
sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
__func__, oh->name);
goto exit;
}
sr_data->name = oh->name;
sr_data->name = name;
if (cpu_is_omap343x())
sr_data->ip_type = 1;
else
@@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
}
}
sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
sr_data->voltdm = voltdm_lookup(voltdm);
if (!sr_data->voltdm) {
pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
__func__, sr_dev_attr->sensor_voltdm_name);
__func__, voltdm);
goto exit;
}
@@ -160,6 +152,20 @@ exit:
return 0;
}
static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
{
struct omap_smartreflex_dev_attr *sr_dev_attr;
sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
__func__, oh->name);
return 0;
}
return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name);
}
/*
* API to be called from board files to enable smartreflex
* autocompensation at init.
@@ -169,7 +175,42 @@ void __init omap_enable_smartreflex_on_init(void)
sr_enable_on_init = true;
}
static const char * const omap4_sr_instances[] = {
"mpu",
"iva",
"core",
};
static const char * const dra7_sr_instances[] = {
"mpu",
"core",
};
int __init omap_devinit_smartreflex(void)
{
const char * const *sr_inst;
int i, nr_sr = 0;
if (soc_is_omap44xx()) {
sr_inst = omap4_sr_instances;
nr_sr = ARRAY_SIZE(omap4_sr_instances);
} else if (soc_is_dra7xx()) {
sr_inst = dra7_sr_instances;
nr_sr = ARRAY_SIZE(dra7_sr_instances);
}
if (nr_sr) {
const char *name, *voltdm;
for (i = 0; i < nr_sr; i++) {
name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]);
voltdm = sr_inst[i];
sr_init_by_name(name, voltdm);
}
return 0;
}
return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
}

View File

@@ -192,6 +192,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",

View File

@@ -322,6 +322,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <0 75 0x4>;
dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",

View File

@@ -325,6 +325,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",

View File

@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
*ppos += count;
return count;
}

View File

@@ -200,8 +200,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
#ifdef CONFIG_STACKTRACE
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
struct stackframe frame;
@@ -209,8 +210,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
start_backtrace(&frame, regs->regs[29], regs->pc);
else if (task == current)
start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0),
(unsigned long)arch_stack_walk);
(unsigned long)__builtin_frame_address(1),
(unsigned long)__builtin_return_address(0));
else
start_backtrace(&frame, thread_saved_fp(task),
thread_saved_pc(task));

View File

@@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
return regs->r10 == -1 ? regs->r8:0;
return regs->r10 == -1 ? -regs->r8:0;
}
static inline long syscall_get_return_value(struct task_struct *task,

View File

@@ -2010,27 +2010,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
{
struct syscall_get_set_args *args = data;
struct pt_regs *pt = args->regs;
unsigned long *krbs, cfm, ndirty;
unsigned long *krbs, cfm, ndirty, nlocals, nouts;
int i, count;
if (unw_unwind_to_user(info) < 0)
return;
/*
* We get here via a few paths:
* - break instruction: cfm is shared with caller.
* syscall args are in out= regs, locals are non-empty.
* - epsinstruction: cfm is set by br.call
* locals don't exist.
*
* For both cases argguments are reachable in cfm.sof - cfm.sol.
* CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
*/
cfm = pt->cr_ifs;
nlocals = (cfm >> 7) & 0x7f; /* aka sol */
nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
count = 0;
if (in_syscall(pt))
count = min_t(int, args->n, cfm & 0x7f);
count = min_t(int, args->n, nouts);
/* Iterate over outs. */
for (i = 0; i < count; i++) {
int j = ndirty + nlocals + i + args->i;
if (args->rw)
*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
args->args[i];
*ia64_rse_skip_regs(krbs, j) = args->args[i];
else
args->args[i] = *ia64_rse_skip_regs(krbs,
ndirty + i + args->i);
args->args[i] = *ia64_rse_skip_regs(krbs, j);
}
if (!args->rw) {

View File

@@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
#define mfdcr(rn) \
({unsigned int rval; \
if (__builtin_constant_p(rn) && rn < 1024) \
asm volatile("mfdcr %0," __stringify(rn) \
: "=r" (rval)); \
asm volatile("mfdcr %0, %1" : "=r" (rval) \
: "n" (rn)); \
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
rval = mfdcrx(rn); \
else \
@@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
#define mtdcr(rn, v) \
do { \
if (__builtin_constant_p(rn) && rn < 1024) \
asm volatile("mtdcr " __stringify(rn) ",%0" \
: : "r" (v)); \
asm volatile("mtdcr %0, %1" \
: : "n" (rn), "r" (v)); \
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
mtdcrx(rn, v); \
else \

View File

@@ -275,14 +275,13 @@ bool is_no_fault_exception(struct pt_regs *regs)
asi = (regs->tstate >> 24); /* saved %asi */
else
asi = (insn >> 5); /* immediate asi */
if ((asi & 0xf2) == ASI_PNF) {
if (insn & 0x1000000) { /* op3[5:4]=3 */
handle_ldf_stq(insn, regs);
return true;
} else if (insn & 0x200000) { /* op3[2], stores */
if ((asi & 0xf6) == ASI_PNF) {
if (insn & 0x200000) /* op3[2], stores */
return false;
}
handle_ld_nf(insn, regs);
if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
handle_ldf_stq(insn, regs);
else
handle_ld_nf(insn, regs);
return true;
}
}

View File

@@ -890,6 +890,12 @@ enum kvm_irqchip_mode {
KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
};
struct kvm_x86_msr_filter {
u8 count;
bool default_allow:1;
struct msr_bitmap_range ranges[16];
};
#define APICV_INHIBIT_REASON_DISABLE 0
#define APICV_INHIBIT_REASON_HYPERV 1
#define APICV_INHIBIT_REASON_NESTED 2
@@ -985,14 +991,12 @@ struct kvm_arch {
bool guest_can_read_msr_platform_info;
bool exception_payload_enabled;
bool bus_lock_detection_enabled;
/* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
u32 user_space_msr_mask;
struct {
u8 count;
bool default_allow:1;
struct msr_bitmap_range ranges[16];
} msr_filter;
struct kvm_x86_msr_filter __rcu *msr_filter;
struct kvm_pmu_event_filter *pmu_event_filter;
struct task_struct *nx_lpage_recovery_thread;

View File

@@ -37,4 +37,11 @@
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
#define ARCH_ADD_TRAMP_KEY(name) \
asm(".pushsection .static_call_tramp_key, \"a\" \n" \
".long " STATIC_CALL_TRAMP_STR(name) " - . \n" \
".long " STATIC_CALL_KEY_STR(name) " - . \n" \
".popsection \n")
#endif /* _ASM_STATIC_CALL_H */

View File

@@ -86,18 +86,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
}
#endif
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define XEN_EXTRA_MEM_RATIO (10)
/*
* Helper functions to write or read unsigned long values to/from
* memory, when the access may fault.

View File

@@ -1505,35 +1505,44 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
{
struct kvm_x86_msr_filter *msr_filter;
struct msr_bitmap_range *ranges;
struct kvm *kvm = vcpu->kvm;
struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
u32 count = kvm->arch.msr_filter.count;
u32 i;
bool r = kvm->arch.msr_filter.default_allow;
bool allowed;
int idx;
u32 i;
/* MSR filtering not set up or x2APIC enabled, allow everything */
if (!count || (index >= 0x800 && index <= 0x8ff))
/* x2APIC MSRs do not support filtering. */
if (index >= 0x800 && index <= 0x8ff)
return true;
/* Prevent collision with set_msr_filter */
idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < count; i++) {
msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
if (!msr_filter) {
allowed = true;
goto out;
}
allowed = msr_filter->default_allow;
ranges = msr_filter->ranges;
for (i = 0; i < msr_filter->count; i++) {
u32 start = ranges[i].base;
u32 end = start + ranges[i].nmsrs;
u32 flags = ranges[i].flags;
unsigned long *bitmap = ranges[i].bitmap;
if ((index >= start) && (index < end) && (flags & type)) {
r = !!test_bit(index - start, bitmap);
allowed = !!test_bit(index - start, bitmap);
break;
}
}
out:
srcu_read_unlock(&kvm->srcu, idx);
return r;
return allowed;
}
EXPORT_SYMBOL_GPL(kvm_msr_allowed);
@@ -5291,25 +5300,34 @@ split_irqchip_unlock:
return r;
}
static void kvm_clear_msr_filter(struct kvm *kvm)
static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
{
u32 i;
u32 count = kvm->arch.msr_filter.count;
struct msr_bitmap_range ranges[16];
struct kvm_x86_msr_filter *msr_filter;
mutex_lock(&kvm->lock);
kvm->arch.msr_filter.count = 0;
memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0]));
mutex_unlock(&kvm->lock);
synchronize_srcu(&kvm->srcu);
msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
if (!msr_filter)
return NULL;
for (i = 0; i < count; i++)
kfree(ranges[i].bitmap);
msr_filter->default_allow = default_allow;
return msr_filter;
}
static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range)
static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
{
u32 i;
if (!msr_filter)
return;
for (i = 0; i < msr_filter->count; i++)
kfree(msr_filter->ranges[i].bitmap);
kfree(msr_filter);
}
static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
struct kvm_msr_filter_range *user_range)
{
struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
struct msr_bitmap_range range;
unsigned long *bitmap = NULL;
size_t bitmap_size;
@@ -5343,11 +5361,9 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user
goto err;
}
/* Everything ok, add this range identifier to our global pool */
ranges[kvm->arch.msr_filter.count] = range;
/* Make sure we filled the array before we tell anyone to walk it */
smp_wmb();
kvm->arch.msr_filter.count++;
/* Everything ok, add this range identifier. */
msr_filter->ranges[msr_filter->count] = range;
msr_filter->count++;
return 0;
err:
@@ -5358,10 +5374,11 @@ err:
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
{
struct kvm_msr_filter __user *user_msr_filter = argp;
struct kvm_x86_msr_filter *new_filter, *old_filter;
struct kvm_msr_filter filter;
bool default_allow;
int r = 0;
bool empty = true;
int r = 0;
u32 i;
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
@@ -5374,25 +5391,32 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
if (empty && !default_allow)
return -EINVAL;
kvm_clear_msr_filter(kvm);
new_filter = kvm_alloc_msr_filter(default_allow);
if (!new_filter)
return -ENOMEM;
kvm->arch.msr_filter.default_allow = default_allow;
/*
* Protect from concurrent calls to this function that could trigger
* a TOCTOU violation on kvm->arch.msr_filter.count.
*/
mutex_lock(&kvm->lock);
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
r = kvm_add_msr_filter(kvm, &filter.ranges[i]);
if (r)
break;
r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
if (r) {
kvm_free_msr_filter(new_filter);
return r;
}
}
mutex_lock(&kvm->lock);
/* The per-VM filter is protected by kvm->lock... */
old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
synchronize_srcu(&kvm->srcu);
kvm_free_msr_filter(old_filter);
kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
mutex_unlock(&kvm->lock);
return r;
return 0;
}
long kvm_arch_vm_ioctl(struct file *filp,
@@ -10423,8 +10447,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
u32 i;
if (current->mm == kvm->mm) {
/*
* Free memory regions allocated on behalf of userspace,
@@ -10441,8 +10463,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
if (kvm_x86_ops.vm_destroy)
kvm_x86_ops.vm_destroy(kvm);
for (i = 0; i < kvm->arch.msr_filter.count; i++)
kfree(kvm->arch.msr_filter.ranges[i].bitmap);
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
kvm_free_vcpus(kvm);

View File

@@ -231,7 +231,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
if (pgprot_val(old_prot) == pgprot_val(new_prot))
return;
pa = pfn << page_level_shift(level);
pa = pfn << PAGE_SHIFT;
size = page_level_size(level);
/*

View File

@@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif
@@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_last_pfn = xen_max_p2m_pfn;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);

View File

@@ -59,6 +59,18 @@ static struct {
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define EXTRA_MEM_RATIO (10)
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
static void __init xen_parse_512gb(void)
@@ -778,13 +790,13 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn;
/*
* Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size.
*
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
*/
extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn);
i = 0;
addr = xen_e820_table.entries[0].addr;

View File

@@ -109,6 +109,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
lockdep_assert_held(&blkg->q->queue_lock);
memset(sum, 0, sizeof(*sum));
rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
struct blkg_rwstat *rwstat;
@@ -122,7 +123,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
rwstat = (void *)pos_blkg + off;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
sum->cnt[i] += blkg_rwstat_read_counter(rwstat, i);
}
rcu_read_unlock();
}

View File

@@ -375,6 +375,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
switch (bio_op(rq->bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
if (queue_max_discard_segments(rq->q) > 1) {
struct bio *bio = rq->bio;
for_each_bio(bio)
nr_phys_segs++;
return nr_phys_segs;
}
return 1;
case REQ_OP_WRITE_ZEROES:
return 0;
case REQ_OP_WRITE_SAME:

View File

@@ -240,7 +240,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
*/
if (op == REQ_OP_ZONE_RESET &&
blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
break;
}

View File

@@ -732,10 +732,8 @@ static void register_disk(struct device *parent, struct gendisk *disk,
disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
if (disk->flags & GENHD_FL_HIDDEN) {
dev_set_uevent_suppress(ddev, 0);
if (disk->flags & GENHD_FL_HIDDEN)
return;
}
disk_scan_partitions(disk);

View File

@@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void)
* just create and link the new node(s) here.
*/
new_node =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name));
if (!new_node) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
new_node->type = init_val->type;

View File

@@ -9,6 +9,8 @@
#ifndef _ACPI_INTERNAL_H_
#define _ACPI_INTERNAL_H_
#include <linux/idr.h>
#define PREFIX "ACPI: "
int early_acpi_osi_init(void);
@@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
#define ACPI_MAX_DEVICE_INSTANCES 4096
struct acpi_device_bus_id {
const char *bus_id;
unsigned int instance_no;
struct ida instance_ida;
struct list_head node;
};

View File

@@ -482,9 +482,8 @@ static void acpi_device_del(struct acpi_device *device)
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
if (!strcmp(acpi_device_bus_id->bus_id,
acpi_device_hid(device))) {
if (acpi_device_bus_id->instance_no > 0)
acpi_device_bus_id->instance_no--;
else {
ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
list_del(&acpi_device_bus_id->node);
kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
@@ -623,12 +622,38 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev)
put_device(&adev->dev);
}
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{
struct acpi_device_bus_id *acpi_device_bus_id;
/* Find suitable bus_id and instance number in acpi_bus_id_list. */
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
return acpi_device_bus_id;
}
return NULL;
}
static int acpi_device_set_name(struct acpi_device *device,
struct acpi_device_bus_id *acpi_device_bus_id)
{
struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
int result;
result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
if (result < 0)
return result;
device->pnp.instance_no = result;
dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
return 0;
}
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
struct acpi_device_bus_id *acpi_device_bus_id;
int result;
struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
int found = 0;
if (device->handle) {
acpi_status status;
@@ -654,41 +679,38 @@ int acpi_device_add(struct acpi_device *device,
INIT_LIST_HEAD(&device->del_list);
mutex_init(&device->physical_node_lock);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
if (!new_bus_id) {
pr_err(PREFIX "Memory allocation error\n");
result = -ENOMEM;
goto err_detach;
}
mutex_lock(&acpi_device_lock);
/*
* Find suitable bus_id and instance number in acpi_bus_id_list
* If failed, create one and link it into acpi_bus_id_list
*/
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
if (!strcmp(acpi_device_bus_id->bus_id,
acpi_device_hid(device))) {
acpi_device_bus_id->instance_no++;
found = 1;
kfree(new_bus_id);
break;
acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
if (acpi_device_bus_id) {
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result)
goto err_unlock;
} else {
acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
GFP_KERNEL);
if (!acpi_device_bus_id) {
result = -ENOMEM;
goto err_unlock;
}
}
if (!found) {
acpi_device_bus_id = new_bus_id;
acpi_device_bus_id->bus_id =
kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
if (!acpi_device_bus_id->bus_id) {
pr_err(PREFIX "Memory allocation error for bus id\n");
kfree(acpi_device_bus_id);
result = -ENOMEM;
goto err_free_new_bus_id;
goto err_unlock;
}
ida_init(&acpi_device_bus_id->instance_ida);
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result) {
kfree(acpi_device_bus_id);
goto err_unlock;
}
acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
if (device->parent)
list_add_tail(&device->node, &device->parent->children);
@@ -720,13 +742,9 @@ int acpi_device_add(struct acpi_device *device,
list_del(&device->node);
list_del(&device->wakeup_list);
err_free_new_bus_id:
if (!found)
kfree(new_bus_id);
err_unlock:
mutex_unlock(&acpi_device_lock);
err_detach:
acpi_detach_data(device->handle, acpi_scan_drop_device);
return result;
}

View File

@@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
.callback = video_detect_force_vendor,
.ident = "Sony VPCEH3U1E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),

View File

@@ -2260,7 +2260,8 @@ out:
return rc;
err_eni_release:
eni_do_release(dev);
dev->phy = NULL;
iounmap(ENI_DEV(dev)->ioaddr);
err_unregister:
atm_dev_deregister(dev);
err_free_consistent:

View File

@@ -262,7 +262,7 @@ static int idt77105_start(struct atm_dev *dev)
{
unsigned long flags;
if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
return -ENOMEM;
PRIV(dev)->dev = dev;
spin_lock_irqsave(&idt77105_priv_lock, flags);
@@ -337,7 +337,7 @@ static int idt77105_stop(struct atm_dev *dev)
else
idt77105_all = walk->next;
dev->phy = NULL;
dev->dev_data = NULL;
dev->phy_data = NULL;
kfree(walk);
break;
}

View File

@@ -2234,6 +2234,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
conf1_write(lanai);
#endif
iounmap(lanai->base);
lanai->base = NULL;
error_pci:
pci_disable_device(lanai->pci);
error:
@@ -2246,6 +2247,8 @@ static int lanai_dev_open(struct atm_dev *atmdev)
static void lanai_dev_close(struct atm_dev *atmdev)
{
struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
if (lanai->base==NULL)
return;
printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n",
lanai->number);
lanai_timed_poll_stop(lanai);
@@ -2553,7 +2556,7 @@ static int lanai_init_one(struct pci_dev *pci,
struct atm_dev *atmdev;
int result;
lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
lanai = kzalloc(sizeof(*lanai), GFP_KERNEL);
if (lanai == NULL) {
printk(KERN_ERR DEV_LABEL
": couldn't allocate dev_data structure!\n");

View File

@@ -211,7 +211,7 @@ static void uPD98402_int(struct atm_dev *dev)
static int uPD98402_start(struct atm_dev *dev)
{
DPRINTK("phy_start\n");
if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
return -ENOMEM;
spin_lock_init(&PRIV(dev)->lock);
memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));

View File

@@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
return 0;
}
static void rpm_put_suppliers(struct device *dev)
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
@@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
device_links_read_lock_held()) {
while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier);
pm_runtime_put_noidle(link->supplier);
if (try_to_suspend)
pm_request_idle(link->supplier);
}
}
static void rpm_put_suppliers(struct device *dev)
{
__rpm_put_suppliers(dev, true);
}
static void rpm_suspend_suppliers(struct device *dev)
{
struct device_link *link;
int idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
pm_request_idle(link->supplier);
device_links_read_unlock(idx);
}
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
if (retval)
if (retval) {
rpm_put_suppliers(dev);
goto fail;
}
device_links_read_unlock(idx);
}
@@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
fail:
rpm_put_suppliers(dev);
__rpm_put_suppliers(dev, false);
fail:
device_links_read_unlock(idx);
}
@@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
if (dev->power.irq_safe)
goto out;
/* Maybe the parent is now able to suspend. */
if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
if (parent && !parent->power.ignore_children) {
spin_unlock(&dev->power.lock);
spin_lock(&parent->power.lock);
@@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
spin_lock(&dev->power.lock);
}
/* Maybe the suppliers are now able to suspend. */
if (dev->power.links_count > 0) {
spin_unlock_irq(&dev->power.lock);
rpm_suspend_suppliers(dev);
spin_lock_irq(&dev->power.lock);
}
out:
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);

View File

@@ -877,6 +877,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (card->mm_pages[0].desc == NULL ||
card->mm_pages[1].desc == NULL) {
dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
ret = -ENOMEM;
goto failed_alloc;
}
reset_page(&card->mm_pages[0]);
@@ -888,8 +889,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
spin_lock_init(&card->lock);
card->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!card->queue)
if (!card->queue) {
ret = -ENOMEM;
goto failed_alloc;
}
tasklet_init(&card->tasklet, process_page, (unsigned long)card);

View File

@@ -891,7 +891,7 @@ next:
out:
for (i = last_map; i < num; i++) {
/* Don't zap current batch's valid persistent grants. */
if(i >= last_map + segs_to_map)
if(i >= map_until)
pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
}

View File

@@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
*/
l3->debug_irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
0x0, "l3-dbg-irq", l3);
IRQF_NO_THREAD, "l3-dbg-irq", l3);
if (ret) {
dev_err(l3->dev, "request_irq failed for %d\n",
l3->debug_irq);
@@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
l3->app_irq = platform_get_irq(pdev, 1);
ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
0x0, "l3-app-irq", l3);
IRQF_NO_THREAD, "l3-app-irq", l3);
if (ret)
dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);

View File

@@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
.name = "gcc_sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = 5,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_floor_ops,
},
};
@@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
.name = "gcc_sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 4,
.ops = &clk_rcg2_floor_ops,
.ops = &clk_rcg2_ops,
},
};

View File

@@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = {
static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
{ .compatible = "arm,vexpress", },
{ .compatible = "calxeda,highbank", },
{ .compatible = "calxeda,ecx-2000", },

View File

@@ -174,7 +174,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
int ret, value;
ret = request_threaded_irq(event->irq, NULL, event->handler,
event->irqflags, "ACPI:Event", event);
event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
if (ret) {
dev_err(acpi_gpio->chip->parent,
"Failed to setup interrupt handler for %d\n",

View File

@@ -239,6 +239,7 @@ source "drivers/gpu/drm/arm/Kconfig"
config DRM_RADEON
tristate "ATI Radeon"
depends on DRM && PCI && MMU
depends on AGP || !AGP
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM

View File

@@ -1093,6 +1093,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0, 0, 0}

View File

@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
ttm_bo_type_kernel, NULL, &gobj);
ttm_bo_type_device, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;

View File

@@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
} else {
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
}
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;

View File

@@ -295,7 +295,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.num_banks = 8,
.num_chans = 4,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 11.72,
.dram_clock_change_latency_us = 23.84,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,

View File

@@ -524,6 +524,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
tmp, MC_CG_ARB_FREQ_F0);
}
static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint16_t pcie_gen = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_gen = 2;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_gen = 1;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_gen = 0;
return pcie_gen;
}
static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint16_t pcie_width = 0;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 16;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 12;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 8;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
return pcie_width;
}
static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -620,6 +662,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
PP_Min_PCIEGen),
get_pcie_lane_support(data->pcie_lane_cap,
PP_Max_PCIELane));
if (data->pcie_dpm_key_disabled)
phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
data->dpm_table.pcie_speed_table.count,
smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
}
return 0;
}
@@ -1180,6 +1227,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
NULL)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
} else {
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Disable,
NULL)),
"Failed to disble pcie DPM during DPM Start Function!",
return -EINVAL);
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,

View File

@@ -54,6 +54,9 @@
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
#define smnPCIE_LC_SPEED_CNTL 0x11140290
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
#define HBM_MEMORY_CHANNEL_WIDTH 128
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
@@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_VCEDPM))
data->smu_features[GNLD_DPM_VCE].supported = true;
if (!data->registry_data.pcie_dpm_key_disabled)
data->smu_features[GNLD_DPM_LINK].supported = true;
data->smu_features[GNLD_DPM_LINK].supported = true;
if (!data->registry_data.dcefclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
@@ -1545,6 +1547,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width;
}
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
}
return 0;
}
@@ -2967,6 +2976,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
if (data->registry_data.pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap),
"Attempt to Disable Link DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0;
}
@@ -4583,6 +4600,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return 0;
}
static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
}
static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
}
static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
@@ -4591,8 +4626,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
PPTable_t *pptable = &(data->smc_state_table.pp_table);
int i, now, size = 0, count = 0;
@@ -4649,15 +4685,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
"*" : "");
break;
case PP_PCIE:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
current_gen_speed =
vega10_get_current_pcie_link_speed_level(hwmgr);
current_lane_width =
vega10_get_current_pcie_link_width_level(hwmgr);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
for (i = 0; i < pcie_table->count; i++)
size += sprintf(buf + size, "%d: %s %s\n", i,
(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
(i == now) ? "*" : "");
size += sprintf(buf + size, "%d: %s %s %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
(gen_speed == 3) ? "16.0GT/s," : "",
(lane_width == 1) ? "x1" :
(lane_width == 2) ? "x2" :
(lane_width == 3) ? "x4" :
(lane_width == 4) ? "x8" :
(lane_width == 5) ? "x12" :
(lane_width == 6) ? "x16" : "",
(current_gen_speed == gen_speed) &&
(current_lane_width == lane_width) ?
"*" : "");
}
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
size = sprintf(buf, "%s:\n", "OD_SCLK");

View File

@@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
}
static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
/* override to the highest if it's disabled from ppfeaturmask */
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
ret = vega12_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to Disable DPM LINK Failed!",
return ret);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0;
}

View File

@@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
data->counter_gfxoff = 0;
data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
}
static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@@ -885,6 +886,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
/* override to the highest if it's disabled from ppfeaturmask */
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
ret = vega20_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to Disable DPM LINK Failed!",
return ret);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0;
}

View File

@@ -675,7 +675,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct page **pages = pvec + pinned;
ret = pin_user_pages_fast(ptr, num_pages,
!userptr->ro ? FOLL_WRITE : 0, pages);
FOLL_WRITE | FOLL_FORCE, pages);
if (ret < 0) {
unpin_user_pages(pvec, pinned);
kvfree(pvec);

View File

@@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
WRITE_ONCE(fence->vma, NULL);
vma->fence = NULL;
with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
/*
* Skip the write to HW if and only if the device is currently
* suspended.
*
* If the driver does not currently hold a wakeref (if_in_use == 0),
* the device may currently be runtime suspended, or it may be woken
* up before the suspend takes place. If the device is not suspended
* (powered down) and we skip clearing the fence register, the HW is
* left in an undefined state where we may end up with multiple
* registers overlapping.
*/
with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
fence_write(fence);
}

View File

@@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
}
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
* __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
* @rpm: the intel_runtime_pm structure
* @ignore_usecount: get a ref even if dev->power.usage_count is 0
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up. It is illegal to try
* and access the HW should intel_runtime_pm_get_if_in_use() report failure.
* already active and ensures that it is powered up. It is illegal to try
* and access the HW should intel_runtime_pm_get_if_active() report failure.
*
* If @ignore_usecount=true, a reference will be acquired even if there is no
* user requiring the device to be powered up (dev->power.usage_count == 0).
* If the function returns false in this case then it's guaranteed that the
* device's runtime suspend hook has been called already or that it will be
* called (and hence it's also guaranteed that the device's runtime resume
* hook will be called eventually).
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
@@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
* as True if the wakeref was acquired, or False otherwise.
*/
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
bool ignore_usecount)
{
if (IS_ENABLED(CONFIG_PM)) {
/*
@@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
return 0;
}
@@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
return track_intel_runtime_pm_wakeref(rpm);
}
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
{
return __intel_runtime_pm_get_if_active(rpm, false);
}
intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
{
return __intel_runtime_pm_get_if_active(rpm, true);
}
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @rpm: the intel_runtime_pm structure

View File

@@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
@@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
#define with_intel_runtime_pm_if_active(rpm, wf) \
for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);

View File

@@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
break;
case MSM_DSI_PHY_7NM:
case MSM_DSI_PHY_7NM_V4_1:
pll = msm_dsi_pll_7nm_init(pdev, id);
pll = msm_dsi_pll_7nm_init(pdev, type, id);
break;
default:
pll = ERR_PTR(-ENXIO);

View File

@@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
}
#endif
#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id);
#else
static inline struct msm_dsi_pll *
msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
return ERR_PTR(-ENODEV);
}

View File

@@ -852,7 +852,8 @@ err_base_clk_hw:
return ret;
}
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
struct dsi_pll_7nm *pll_7nm;
struct msm_dsi_pll *pll;
@@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
pll = &pll_7nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
if (type == MSM_DSI_PHY_7NM_V4_1) {
pll->min_rate = 600000000UL;
pll->max_rate = (unsigned long)5000000000ULL;
/* workaround for max rate overflowing on 32-bit builds: */

View File

@@ -1079,6 +1079,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
static int __maybe_unused msm_pm_prepare(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
if (!priv || !priv->kms)
return 0;
return drm_mode_config_helper_suspend(ddev);
}
@@ -1086,6 +1090,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
static void __maybe_unused msm_pm_complete(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
if (!priv || !priv->kms)
return;
drm_mode_config_helper_resume(ddev);
}
@@ -1318,6 +1326,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
static void msm_pdev_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
if (!priv || !priv->kms)
return;
drm_atomic_helper_shutdown(drm);
}

View File

@@ -3610,13 +3610,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
ep->com.local_addr.ss_family == AF_INET) {
err = cxgb4_remove_server_filter(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
ep->com.dev->rdev.lldi.rxq_ids[0], false);
} else {
struct sockaddr_in6 *sin6;
c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
ep->com.dev->rdev.lldi.rxq_ids[0], true);
if (err)
goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,

View File

@@ -179,5 +179,6 @@ err_free_tcu:
}
IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);

View File

@@ -155,6 +155,7 @@ static int __init intc_2chip_of_init(struct device_node *node,
{
return ingenic_intc_of_init(node, 2);
}
IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init);
IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init);
IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init);
IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init);

View File

@@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
* Grab our output buffer.
*/
nl = orig_nl = get_result_buffer(param, param_size, &len);
if (len < needed) {
if (len < needed || len < sizeof(nl->dev)) {
param->flags |= DM_BUFFER_FULL_FLAG;
goto out;
}

View File

@@ -1678,6 +1678,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
return !q || blk_queue_zoned_model(q) != *zoned_model;
}
/*
* Check the device zoned model based on the target feature flag. If the target
* has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
* also accepted but all devices must have the same zoned model. If the target
* has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
* zoned model with all zoned devices having the same zone size.
*/
static bool dm_table_supports_zoned_model(struct dm_table *t,
enum blk_zoned_model zoned_model)
{
@@ -1687,13 +1694,15 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (zoned_model == BLK_ZONED_HM &&
!dm_target_supports_zoned_hm(ti->type))
return false;
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
return false;
if (dm_target_supports_zoned_hm(ti->type)) {
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_zoned_model,
&zoned_model))
return false;
} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
if (zoned_model == BLK_ZONED_HM)
return false;
}
}
return true;
@@ -1705,9 +1714,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data;
if (!blk_queue_is_zoned(q))
return 0;
return !q || blk_queue_zone_sectors(q) != *zone_sectors;
}
/*
* Check consistency of zoned model and zone sectors across all targets. For
* zone sectors, if the destination device is a zoned block device, it shall
* have the specified zone_sectors.
*/
static int validate_hardware_zoned_model(struct dm_table *table,
enum blk_zoned_model zoned_model,
unsigned int zone_sectors)
@@ -1726,7 +1743,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
return -EINVAL;
if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
DMERR("%s: zone sectors is not consistent across all devices",
DMERR("%s: zone sectors is not consistent across all zoned devices",
dm_device_name(table->md));
return -EINVAL;
}

View File

@@ -34,7 +34,7 @@
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC + \
#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;

View File

@@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
static struct target_type dmz_type = {
.name = "zoned",
.version = {2, 0, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
.features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
.module = THIS_MODULE,
.ctr = dmz_ctr,
.dtr = dmz_dtr,

View File

@@ -106,6 +106,8 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_list_lock);
put_pid(hpriv->taskpid);
kfree(hpriv);
return 0;

View File

@@ -3918,15 +3918,11 @@ static int bond_neigh_init(struct neighbour *n)
rcu_read_lock();
slave = bond_first_slave_rcu(bond);
if (!slave) {
ret = -EINVAL;
if (!slave)
goto out;
}
slave_ops = slave->dev->netdev_ops;
if (!slave_ops->ndo_neigh_setup) {
ret = -EINVAL;
if (!slave_ops->ndo_neigh_setup)
goto out;
}
/* TODO: find another way [1] to implement this.
* Passing a zeroed structure is fragile,

View File

@@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
.brp_inc = 1,
};
static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
{
if (priv->device)
pm_runtime_enable(priv->device);
}
static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
{
if (priv->device)
pm_runtime_disable(priv->device);
}
static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
{
if (priv->device)
@@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = {
int register_c_can_dev(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
int err;
/* Deactivate pins to prevent DRA7 DCAN IP from being
@@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev)
*/
pinctrl_pm_select_sleep_state(dev->dev.parent);
c_can_pm_runtime_enable(priv);
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
err = register_candev(dev);
if (err)
c_can_pm_runtime_disable(priv);
else
if (!err)
devm_can_led_init(dev);
return err;
}
EXPORT_SYMBOL_GPL(register_c_can_dev);
void unregister_c_can_dev(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
unregister_candev(dev);
c_can_pm_runtime_disable(priv);
}
EXPORT_SYMBOL_GPL(unregister_c_can_dev);

View File

@@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct c_can_priv *priv = netdev_priv(dev);
void __iomem *addr = priv->base;
unregister_c_can_dev(dev);
free_c_can_dev(dev);
pci_iounmap(pdev, priv->base);
pci_iounmap(pdev, addr);
pci_disable_msi(pdev);
pci_clear_master(pdev);
pci_release_regions(pdev);

View File

@@ -29,6 +29,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
pm_runtime_enable(priv->device);
ret = register_c_can_dev(dev);
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
@@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
return 0;
exit_free_device:
pm_runtime_disable(priv->device);
free_c_can_dev(dev);
exit:
dev_err(&pdev->dev, "probe failed\n");
@@ -408,9 +411,10 @@ exit:
static int c_can_plat_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct c_can_priv *priv = netdev_priv(dev);
unregister_c_can_dev(dev);
pm_runtime_disable(priv->device);
free_c_can_dev(dev);
return 0;

View File

@@ -1255,6 +1255,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.netns_refund = true,
.maxtype = IFLA_CAN_MAX,
.policy = can_policy,
.setup = can_setup,

View File

@@ -658,9 +658,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
static int flexcan_chip_freeze(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
unsigned int timeout;
u32 bitrate = priv->can.bittiming.bitrate;
u32 reg;
if (bitrate)
timeout = 1000 * 1000 * 10 / bitrate;
else
timeout = FLEXCAN_TIMEOUT_US / 10;
reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
priv->write(reg, &regs->mcr);

View File

@@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
/* Loopback control register */
@@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
0);
/* Disable Bus load reporting */
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
tx_npackets = ioread32(can->reg_base +
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &

View File

@@ -502,9 +502,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
}
while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
if (rxfs & RXFS_RFL)
netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
m_can_read_fifo(dev, rxfs);
quota--;
@@ -885,7 +882,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
m_can_rx_handler(dev, 1);
m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
m_can_enable_all_interrupts(cdev);

View File

@@ -1070,13 +1070,6 @@ static int b53_setup(struct dsa_switch *ds)
b53_disable_port(ds, port);
}
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
* would be breaking filtering semantics for any of the other bridge
* devices. (not hardware supported)
*/
ds->vlan_filtering_is_global = true;
return b53_setup_devlink_resources(ds);
}
@@ -2627,6 +2620,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
ds->configure_vlan_while_not_filtering = true;
ds->untag_bridge_pvid = true;
dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
* would be breaking filtering semantics for any of the other bridge
* devices. (not hardware supported)
*/
ds->vlan_filtering_is_global = true;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);

View File

@@ -584,8 +584,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
* in bits 15:8 and the patch level in bits 7:0 which is exactly what
* the REG_PHY_REVISION register layout is.
*/
return priv->hw_params.gphy_rev;
if (priv->int_phy_mask & BIT(port))
return priv->hw_params.gphy_rev;
else
return 0;
}
static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,

View File

@@ -727,7 +727,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
kvfree(tx_info);
return 0;
}
tx_info->open_state = false;
tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
spin_unlock(&tx_info->lock);
complete(&tx_info->completion);

View File

@@ -1510,7 +1510,7 @@ dm9000_probe(struct platform_device *pdev)
goto out;
}
db->irq_wake = platform_get_irq(pdev, 1);
db->irq_wake = platform_get_irq_optional(pdev, 1);
if (db->irq_wake >= 0) {
dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);

View File

@@ -1308,6 +1308,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
*/
if (unlikely(priv->need_mac_restart)) {
ftgmac100_start_hw(priv);
priv->need_mac_restart = false;
/* Re-enable "bad" interrupts */
iowrite32(FTGMAC100_INT_BAD,

View File

@@ -234,6 +234,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_MAXFRM 0x8014
#define ENETC_SET_TX_MTU(val) ((val) << 16)
#define ENETC_SET_MAXFRM(val) ((val) & 0xffff)
#define ENETC_PM0_RX_FIFO 0x801c
#define ENETC_PM0_RX_FIFO_VAL 1
#define ENETC_PM_IMDIO_BASE 0x8030

View File

@@ -490,6 +490,12 @@ static void enetc_configure_port_mac(struct enetc_hw *hw)
enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
/* On LS1028A, the MAC RX FIFO defaults to 2, which is too high
* and may lead to RX lock-up under traffic. Set it to 1 instead,
* as recommended by the hardware team.
*/
enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
}
static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)

View File

@@ -377,9 +377,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
u64 ns;
unsigned long flags;
mutex_lock(&adapter->ptp_clk_mutex);
/* Check the ptp clock */
if (!adapter->ptp_clk_on) {
mutex_unlock(&adapter->ptp_clk_mutex);
return -EINVAL;
}
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
mutex_unlock(&adapter->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);

View File

@@ -2391,6 +2391,10 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
if (lstatus & BD_LFLAG(RXBD_LAST))
size -= skb->len;
WARN(size < 0, "gianfar: rx fragment size underflow");
if (size < 0)
return false;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rxb->page_offset + RXBUF_ALIGNMENT,
size, GFAR_RXB_TRUESIZE);
@@ -2553,6 +2557,17 @@ static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
if (lstatus & BD_LFLAG(RXBD_EMPTY))
break;
/* lost RXBD_LAST descriptor due to overrun */
if (skb &&
(lstatus & BD_LFLAG(RXBD_FIRST))) {
/* discard faulty buffer */
dev_kfree_skb(skb);
skb = NULL;
rx_queue->stats.rx_dropped++;
/* can continue normally */
}
/* order rx buffer descriptor reads */
rmb();

View File

@@ -1663,8 +1663,10 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
for (j = 0; j < fetch_num; j++) {
/* alloc one skb and init */
skb = hns_assemble_skb(ndev);
if (!skb)
if (!skb) {
ret = -ENOMEM;
goto out;
}
rd = &tx_ring_data(priv, skb->queue_mapping);
hns_nic_net_xmit_hw(ndev, skb, rd);

View File

@@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
if (ret_val)
return ret_val;
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable

View File

@@ -5974,15 +5974,19 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
rtnl_lock();
/* don't run the task if already down */
if (test_bit(__E1000_DOWN, &adapter->state))
if (test_bit(__E1000_DOWN, &adapter->state)) {
rtnl_unlock();
return;
}
if (!(adapter->flags & FLAG_RESTART_NOW)) {
e1000e_dump(adapter);
e_err("Reset adapter unexpectedly\n");
}
e1000e_reinit_locked(adapter);
rtnl_unlock();
}
/**

View File

@@ -1776,7 +1776,8 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
goto err_alloc;
}
if (iavf_process_config(adapter))
err = iavf_process_config(adapter);
if (err)
goto err_alloc;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;

View File

@@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb);
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);

View File

@@ -8232,7 +8232,8 @@ static inline bool igb_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -8243,7 +8244,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define IGB_LAST_OFFSET \
@@ -8319,9 +8320,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
xdp->data += IGB_TS_HDR_LEN;
size -= IGB_TS_HDR_LEN;
if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
xdp->data += IGB_TS_HDR_LEN;
size -= IGB_TS_HDR_LEN;
}
}
/* Determine available headroom for copy */
@@ -8382,8 +8384,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
__skb_pull(skb, IGB_TS_HDR_LEN);
if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
__skb_pull(skb, IGB_TS_HDR_LEN);
}
/* update buffer offset */
@@ -8632,11 +8634,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
}
static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
const unsigned int size)
const unsigned int size, int *rx_buf_pgcnt)
{
struct igb_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
*rx_buf_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@@ -8652,9 +8660,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
}
static void igb_put_rx_buffer(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer)
struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
{
if (igb_can_reuse_rx_page(rx_buffer)) {
if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -8681,6 +8689,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
u16 cleaned_count = igb_desc_unused(rx_ring);
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
int rx_buf_pgcnt;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -8711,7 +8720,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
*/
dma_rmb();
rx_buffer = igb_get_rx_buffer(rx_ring, size);
rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -8754,7 +8763,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
break;
}
igb_put_rx_buffer(rx_ring, rx_buffer);
igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
cleaned_count++;
/* fetch next buffer in frame if non-eop */

View File

@@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
dev_kfree_skb_any(skb);
}
#define IGB_RET_PTP_DISABLED 1
#define IGB_RET_PTP_INVALID 2
/**
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
@@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8.
* byte 8
*
* Returns: 0 if success, nonzero if failure
**/
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb)
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
struct igb_adapter *adapter = q_vector->adapter;
__le64 *regval = (__le64 *)va;
int adjust = 0;
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
return IGB_RET_PTP_DISABLED;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
/* check reserved dwords are zero, be/le doesn't matter for zero */
if (regval[0])
return IGB_RET_PTP_INVALID;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
@@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
}
skb_hwtstamps(skb)->hwtstamp =
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
return 0;
}
/**
@@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
* This function is meant to retrieve a timestamp from the internal registers
* of the adapter and store it in the skb.
**/
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
int adjust = 0;
u64 regval;
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
return;
/* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so

View File

@@ -545,7 +545,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
struct sk_buff *skb);
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);

View File

@@ -1695,6 +1695,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
Autoneg);
}
/* Set pause flow control settings */
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
switch (hw->fc.requested_mode) {
case igc_fc_full:
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
@@ -1709,9 +1712,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
Asym_Pause);
break;
default:
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
break;
}
status = pm_runtime_suspended(&adapter->pdev->dev) ?

View File

@@ -3846,10 +3846,19 @@ static void igc_reset_task(struct work_struct *work)
adapter = container_of(work, struct igc_adapter, reset_task);
rtnl_lock();
/* If we're already down or resetting, just bail */
if (test_bit(__IGC_DOWN, &adapter->state) ||
test_bit(__IGC_RESETTING, &adapter->state)) {
rtnl_unlock();
return;
}
igc_rings_dump(adapter);
igc_regs_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igc_reinit_locked(adapter);
rtnl_unlock();
}
/**

View File

@@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
}
/**
* igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
* @skb: Buffer containing timestamp and packet
*
* This function is meant to retrieve the first timestamp from the
* first buffer of an incoming frame. The value is stored in little
* endian format starting on byte 0. There's a second timestamp
* starting on byte 8.
**/
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
* This function retrieves the timestamp saved in the beginning of packet
* buffer. While two timestamps are available, one in timer0 reference and the
* other in timer1 reference, this function considers only the timestamp in
* timer0 reference.
*/
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
struct sk_buff *skb)
{
struct igc_adapter *adapter = q_vector->adapter;
__le64 *regval = (__le64 *)va;
int adjust = 0;
u64 regval;
int adjust;
/* The timestamp is recorded in little endian format.
* DWORD: | 0 | 1 | 2 | 3
* Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
/* Timestamps are saved in little endian at the beginning of the packet
* buffer following the layout:
*
* DWORD: | 0 | 1 | 2 | 3 |
* Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
*
* SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
* part of the timestamp.
*/
igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[0]));
regval = le32_to_cpu(va[2]);
regval |= (u64)le32_to_cpu(va[3]) << 32;
igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
/* adjust timestamp for the RX latency based on link speed */
if (adapter->hw.mac.type == igc_i225) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGC_I225_RX_LATENCY_10;
break;
case SPEED_100:
adjust = IGC_I225_RX_LATENCY_100;
break;
case SPEED_1000:
adjust = IGC_I225_RX_LATENCY_1000;
break;
case SPEED_2500:
adjust = IGC_I225_RX_LATENCY_2500;
break;
}
/* Adjust timestamp for the RX latency based on link speed */
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGC_I225_RX_LATENCY_10;
break;
case SPEED_100:
adjust = IGC_I225_RX_LATENCY_100;
break;
case SPEED_1000:
adjust = IGC_I225_RX_LATENCY_1000;
break;
case SPEED_2500:
adjust = IGC_I225_RX_LATENCY_2500;
break;
default:
adjust = 0;
netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
break;
}
skb_hwtstamps(skb)->hwtstamp =
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);

View File

@@ -9582,8 +9582,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
input->sw_idx, queue);
if (!err)
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
if (err)
goto err_out_w_lock;
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))

View File

@@ -140,6 +140,15 @@ enum npc_kpu_lh_ltype {
NPC_LT_LH_CUSTOM1 = 0xF,
};
/* NPC port kind defines how the incoming or outgoing packets
* are processed. NPC accepts packets from up to 64 pkinds.
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
enum npc_pkind_type {
NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
};
struct npc_kpu_profile_cam {
u8 state;
u8 state_mask;
@@ -300,6 +309,28 @@ struct nix_rx_action {
/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
/* NPC_PARSE_KEX_S nibble definitions for each field */
#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0)
#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3)
#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4)
#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6)
#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7)
#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9)
#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10)
#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12)
#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13)
#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15)
#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16)
#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18)
#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19)
#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21)
#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22)
#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24)
#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25)
#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27)
#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28)
#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
/* NIX Receive Vtag Action Structure */
#define VTAG0_VALID_BIT BIT_ULL(15)
#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)

View File

@@ -148,6 +148,20 @@
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
((flags_ena) << 6) | ((key_ofs) & 0x3F))
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
NPC_PARSE_NIBBLE_LD_LTYPE | \
NPC_PARSE_NIBBLE_LE_LTYPE)
/* Tx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
NPC_PARSE_NIBBLE_LD_LTYPE | \
NPC_PARSE_NIBBLE_LE_LTYPE)
enum npc_kpu_parser_state {
NPC_S_NA = 0,
NPC_S_KPU1_ETHER,
@@ -13385,9 +13399,10 @@ static const struct npc_mcam_kex npc_mkex_default = {
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
/* nibble: LA..LE (ltype only) + Channel */
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
/* nibble: LA..LE (ltype only) + channel */
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
},
.intf_lid_lt_ld = {
/* Default RX MCAM KEX profile */
@@ -13405,12 +13420,14 @@ static const struct npc_mcam_kex npc_mkex_default = {
/* Layer B: Single VLAN (CTAG) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
/* Outer VLAN: 2 bytes, KW0[63:48] */
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
/* Ethertype: 2 bytes, KW0[47:32] */
KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
},
[NPC_LT_LB_FDSA] = {
/* SWITCH PORT: 1 byte, KW0[63:48] */
@@ -13436,17 +13453,69 @@ static const struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LD] = {
/* Layer D:UDP */
[NPC_LT_LD_UDP] = {
/* SPORT: 2 bytes, KW3[15:0] */
KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
/* DPORT: 2 bytes, KW3[31:16] */
KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
/* Layer D:TCP */
[NPC_LT_LD_TCP] = {
/* SPORT: 2 bytes, KW3[15:0] */
KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
/* DPORT: 2 bytes, KW3[31:16] */
KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
},
},
/* Default TX MCAM KEX profile */
[NIX_INTF_TX] = {
[NPC_LID_LA] = {
/* Layer A: NIX_INST_HDR_S + Ethernet */
/* NIX appends 8 bytes of NIX_INST_HDR_S at the
* start of each TX packet supplied to NPC.
*/
[NPC_LT_LA_IH_NIX_ETHER] = {
/* PF_FUNC: 2B , KW0 [47:32] */
KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
/* DMAC: 6 bytes, KW1[63:16] */
KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
},
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
[NPC_LT_LB_CTAG] = {
/* CTAG VLAN[2..3] KW0[63:48] */
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
/* CTAG VLAN[2..3] KW1[15:0] */
KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
/* Outer VLAN: 2 bytes, KW0[63:48] */
KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
/* Outer VLAN: 2 Bytes, KW1[15:0] */
KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
},
},
[NPC_LID_LC] = {
/* Layer C: IPv4 */
[NPC_LT_LC_IP] = {
/* SIP+DIP: 8 bytes, KW2[63:0] */
KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
},
/* Layer C: IPv6 */
[NPC_LT_LC_IP6] = {
/* Everything up to SADDR: 8 bytes, KW2[63:0] */
KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
},
},
[NPC_LID_LD] = {
/* Layer D:UDP */
[NPC_LT_LD_UDP] = {
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
/* Layer D:TCP */
[NPC_LT_LD_TCP] = {
/* SPORT+DPORT: 4 bytes, KW3[31:0] */
KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
},
},

View File

@@ -2151,8 +2151,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
for (irq = 0; irq < rvu->num_vec; irq++) {
if (rvu->irq_allocated[irq])
if (rvu->irq_allocated[irq]) {
free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
rvu->irq_allocated[irq] = false;
}
}
pci_free_irq_vectors(rvu->pdev);

View File

@@ -144,12 +144,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
int index, off = 0, flag = 0, go_back = 0, off_prev;
int index, off = 0, flag = 0, go_back = 0, len = 0;
struct rvu *rvu = filp->private_data;
int lf, pf, vf, pcifunc;
struct rvu_block block;
int bytes_not_copied;
int lf_str_size = 12;
int buf_size = 2048;
char *lfs;
char *buf;
/* don't allow partial reads */
@@ -159,12 +161,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOSPC;
off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
lfs = kzalloc(lf_str_size, GFP_KERNEL);
if (!lfs) {
kfree(buf);
return -ENOMEM;
}
off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
"pcifunc");
for (index = 0; index < BLK_COUNT; index++)
if (strlen(rvu->hw->block[index].name))
off += scnprintf(&buf[off], buf_size - 1 - off,
"%*s\t", (index - 1) * 2,
rvu->hw->block[index].name);
if (strlen(rvu->hw->block[index].name)) {
off += scnprintf(&buf[off], buf_size - 1 - off,
"%-*s", lf_str_size,
rvu->hw->block[index].name);
}
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
@@ -173,14 +183,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
continue;
if (vf) {
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
go_back = scnprintf(&buf[off],
buf_size - 1 - off,
"PF%d:VF%d\t\t", pf,
vf - 1);
"%-*s", lf_str_size, lfs);
} else {
sprintf(lfs, "PF%d", pf);
go_back = scnprintf(&buf[off],
buf_size - 1 - off,
"PF%d\t\t", pf);
"%-*s", lf_str_size, lfs);
}
off += go_back;
@@ -188,20 +199,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
block = rvu->hw->block[index];
if (!strlen(block.name))
continue;
off_prev = off;
len = 0;
lfs[len] = '\0';
for (lf = 0; lf < block.lf.max; lf++) {
if (block.fn_map[lf] != pcifunc)
continue;
flag = 1;
off += scnprintf(&buf[off], buf_size - 1
- off, "%3d,", lf);
len += sprintf(&lfs[len], "%d,", lf);
}
if (flag && off_prev != off)
off--;
else
go_back++;
if (flag)
len--;
lfs[len] = '\0';
off += scnprintf(&buf[off], buf_size - 1 - off,
"\t");
"%-*s", lf_str_size, lfs);
if (!strlen(lfs))
go_back += lf_str_size;
}
if (!flag)
off -= go_back;
@@ -213,6 +226,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
}
bytes_not_copied = copy_to_user(buffer, buf, off);
kfree(lfs);
kfree(buf);
if (bytes_not_copied)

Some files were not shown because too many files have changed in this diff Show More