Merge 5.4-rc8 into android-mainline

Linux 5.4-rc8

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I1f55e5d34dc78ddb064910ce1e1b7a7b5b39aaba
This commit is contained in:
Greg Kroah-Hartman
2019-11-18 08:31:11 +01:00
139 changed files with 1075 additions and 478 deletions

View File

@@ -3261,7 +3261,6 @@ S: Maintained
F: drivers/cpufreq/bmips-cpufreq.c F: drivers/cpufreq/bmips-cpufreq.c
BROADCOM BMIPS MIPS ARCHITECTURE BROADCOM BMIPS MIPS ARCHITECTURE
M: Kevin Cernekee <cernekee@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com L: bcm-kernel-feedback-list@broadcom.com
L: linux-mips@vger.kernel.org L: linux-mips@vger.kernel.org
@@ -8299,11 +8298,14 @@ F: drivers/hid/intel-ish-hid/
INTEL IOMMU (VT-d) INTEL IOMMU (VT-d)
M: David Woodhouse <dwmw2@infradead.org> M: David Woodhouse <dwmw2@infradead.org>
M: Lu Baolu <baolu.lu@linux.intel.com>
L: iommu@lists.linux-foundation.org L: iommu@lists.linux-foundation.org
T: git git://git.infradead.org/iommu-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
S: Supported S: Supported
F: drivers/iommu/intel-iommu.c F: drivers/iommu/dmar.c
F: drivers/iommu/intel*.[ch]
F: include/linux/intel-iommu.h F: include/linux/intel-iommu.h
F: include/linux/intel-svm.h
INTEL IOP-ADMA DMA DRIVER INTEL IOP-ADMA DMA DRIVER
R: Dan Williams <dan.j.williams@intel.com> R: Dan Williams <dan.j.williams@intel.com>

View File

@@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc7 EXTRAVERSION = -rc8
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus
# *DOCUMENTATION* # *DOCUMENTATION*
@@ -921,6 +921,9 @@ ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr LDFLAGS_vmlinux += --pack-dyn-relocs=relr
endif endif
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
# insure the checker run with the right endianness # insure the checker run with the right endianness
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian) CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)

View File

@@ -38,10 +38,3 @@ config REPLICATE_KTEXT
Say Y here to enable replicating the kernel text across multiple Say Y here to enable replicating the kernel text across multiple
nodes in a NUMA cluster. This trades memory for speed. nodes in a NUMA cluster. This trades memory for speed.
config REPLICATE_EXHANDLERS
bool "Exception handler replication support"
depends on SGI_IP27
help
Say Y here to enable replicating the kernel exception handlers
across multiple nodes in a NUMA cluster. This trades memory for
speed.

View File

@@ -69,23 +69,14 @@ static void per_hub_init(cnodeid_t cnode)
hub_rtc_init(cnode); hub_rtc_init(cnode);
#ifdef CONFIG_REPLICATE_EXHANDLERS if (nasid) {
/* /* copy exception handlers from first node to current node */
* If this is not a headless node initialization, memcpy((void *)NODE_OFFSET_TO_K0(nasid, 0),
* copy over the caliased exception handlers. (void *)CKSEG0, 0x200);
*/
if (get_compact_nodeid() == cnode) {
extern char except_vec2_generic, except_vec3_generic;
extern void build_tlb_refill_handler(void);
memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
build_tlb_refill_handler();
memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
__flush_cache_all(); __flush_cache_all();
/* switch to node local exception handlers */
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
} }
#endif
} }
void per_cpu_init(void) void per_cpu_init(void)

View File

@@ -332,11 +332,7 @@ static void __init mlreset(void)
* thinks it is a node 0 address. * thinks it is a node 0 address.
*/ */
REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1)); REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
#ifdef CONFIG_REPLICATE_EXHANDLERS
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
#else
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0); REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
#endif
#ifdef LATER #ifdef LATER
/* /*

View File

@@ -65,14 +65,14 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS
# #
# vDSO code runs in userspace and -pg doesn't help with profiling anyway. # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
# #
CFLAGS_REMOVE_vdso-note.o = -pg
CFLAGS_REMOVE_vclock_gettime.o = -pg CFLAGS_REMOVE_vclock_gettime.o = -pg
CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
$(obj)/%.so: OBJCOPYFLAGS := -S $(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1 VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
#This makes sure the $(obj) subdirectory exists even though vdso32/ #This makes sure the $(obj) subdirectory exists even though vdso32/

View File

@@ -461,10 +461,8 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
} }
rdtgrp = rdtgroup_kn_lock_live(of->kn); rdtgrp = rdtgroup_kn_lock_live(of->kn);
rdt_last_cmd_clear();
if (!rdtgrp) { if (!rdtgrp) {
ret = -ENOENT; ret = -ENOENT;
rdt_last_cmd_puts("Directory was removed\n");
goto unlock; goto unlock;
} }
@@ -2648,10 +2646,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
int ret; int ret;
prdtgrp = rdtgroup_kn_lock_live(prgrp_kn); prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
rdt_last_cmd_clear();
if (!prdtgrp) { if (!prdtgrp) {
ret = -ENODEV; ret = -ENODEV;
rdt_last_cmd_puts("Directory was removed\n");
goto out_unlock; goto out_unlock;
} }

View File

@@ -710,6 +710,8 @@ static struct chipset early_qrk[] __initdata = {
*/ */
{ PCI_VENDOR_ID_INTEL, 0x0f00, { PCI_VENDOR_ID_INTEL, 0x0f00,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_INTEL, 0x3ec4,
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{ PCI_VENDOR_ID_BROADCOM, 0x4331, { PCI_VENDOR_ID_BROADCOM, 0x4331,
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
{} {}

View File

@@ -51,7 +51,12 @@
extern bool itlb_multihit_kvm_mitigation; extern bool itlb_multihit_kvm_mitigation;
static int __read_mostly nx_huge_pages = -1; static int __read_mostly nx_huge_pages = -1;
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
static uint __read_mostly nx_huge_pages_recovery_ratio = 60; static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
#endif
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
@@ -6280,14 +6285,13 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
if (new_val != old_val) { if (new_val != old_val) {
struct kvm *kvm; struct kvm *kvm;
int idx;
mutex_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
idx = srcu_read_lock(&kvm->srcu); mutex_lock(&kvm->slots_lock);
kvm_mmu_zap_all_fast(kvm); kvm_mmu_zap_all_fast(kvm);
srcu_read_unlock(&kvm->srcu, idx); mutex_unlock(&kvm->slots_lock);
wake_up_process(kvm->arch.nx_lpage_recovery_thread); wake_up_process(kvm->arch.nx_lpage_recovery_thread);
} }

View File

@@ -5130,6 +5130,10 @@ static void kvm_init_msr_list(void)
perf_get_x86_pmu_capability(&x86_pmu); perf_get_x86_pmu_capability(&x86_pmu);
num_msrs_to_save = 0;
num_emulated_msrs = 0;
num_msr_based_features = 0;
for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) { for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0) if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
continue; continue;

View File

@@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
} }
} }
static
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
/*
* To prevent bfqq's service guarantees from being violated,
* bfqq may be left busy, i.e., queued for service, even if
* empty (see comments in __bfq_bfqq_expire() for
* details). But, if no process will send requests to bfqq any
* longer, then there is no point in keeping bfqq queued for
* service. In addition, keeping bfqq queued for service, but
* with no process ref any longer, may have caused bfqq to be
* freed when dequeued from service. But this is assumed to
* never happen.
*/
if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
bfqq != bfqd->in_service_queue)
bfq_del_bfqq_busy(bfqd, bfqq, false);
bfq_put_queue(bfqq);
}
static void static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
@@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
*/ */
new_bfqq->pid = -1; new_bfqq->pid = -1;
bfqq->bic = NULL; bfqq->bic = NULL;
/* release process reference to bfqq */ bfq_release_process_ref(bfqd, bfqq);
bfq_put_queue(bfqq);
} }
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq); bfq_put_cooperator(bfqq);
bfq_put_queue(bfqq); /* release process reference */ bfq_release_process_ref(bfqd, bfqq);
} }
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
@@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
bfqq = bic_to_bfqq(bic, false); bfqq = bic_to_bfqq(bic, false);
if (bfqq) { if (bfqq) {
/* release process reference on this queue */ bfq_release_process_ref(bfqd, bfqq);
bfq_put_queue(bfqq);
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
bic_set_bfqq(bic, bfqq, false); bic_set_bfqq(bic, bfqq, false);
} }
@@ -5963,7 +5983,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq); bfq_put_cooperator(bfqq);
bfq_put_queue(bfqq); bfq_release_process_ref(bfqq->bfqd, bfqq);
return NULL; return NULL;
} }

View File

@@ -753,7 +753,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false; return false;
if (bio->bi_vcnt > 0) { if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) { if (page_is_mergeable(bv, page, len, off, same_page)) {

View File

@@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
atomic64_set(&iocg->active_period, cur_period); atomic64_set(&iocg->active_period, cur_period);
/* already activated or breaking leaf-only constraint? */ /* already activated or breaking leaf-only constraint? */
for (i = iocg->level; i > 0; i--)
if (!list_empty(&iocg->active_list)) if (!list_empty(&iocg->active_list))
goto succeed_unlock;
for (i = iocg->level - 1; i > 0; i--)
if (!list_empty(&iocg->ancestors[i]->active_list))
goto fail_unlock; goto fail_unlock;
if (iocg->child_active_sum) if (iocg->child_active_sum)
goto fail_unlock; goto fail_unlock;
@@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
ioc_start_period(ioc, now); ioc_start_period(ioc, now);
} }
succeed_unlock:
spin_unlock_irq(&ioc->lock); spin_unlock_irq(&ioc->lock);
return true; return true;

View File

@@ -872,3 +872,39 @@ int walk_memory_blocks(unsigned long start, unsigned long size,
} }
return ret; return ret;
} }
struct for_each_memory_block_cb_data {
walk_memory_blocks_func_t func;
void *arg;
};
static int for_each_memory_block_cb(struct device *dev, void *data)
{
struct memory_block *mem = to_memory_block(dev);
struct for_each_memory_block_cb_data *cb_data = data;
return cb_data->func(mem, cb_data->arg);
}
/**
* for_each_memory_block - walk through all present memory blocks
*
* @arg: argument passed to func
* @func: callback for each memory block walked
*
* This function walks through all present memory blocks, calling func on
* each memory block.
*
* In case func() returns an error, walking is aborted and the error is
* returned.
*/
int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
{
struct for_each_memory_block_cb_data cb_data = {
.func = func,
.arg = arg,
};
return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
for_each_memory_block_cb);
}

View File

@@ -2087,7 +2087,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_data *osd_data; struct ceph_osd_data *osd_data;
u64 objno; u64 objno;
u8 state, new_state, current_state; u8 state, new_state, uninitialized_var(current_state);
bool has_current_state; bool has_current_state;
void *p; void *p;

View File

@@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
cancel_work_sync(&card->event_work); cancel_work_sync(&card->event_work);
destroy_workqueue(card->event_wq);
rsxx_destroy_dev(card); rsxx_destroy_dev(card);
rsxx_dma_destroy(card); rsxx_dma_destroy(card);
destroy_workqueue(card->creg_ctrl.creg_wq);
spin_lock_irqsave(&card->irq_lock, flags); spin_lock_irqsave(&card->irq_lock, flags);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL); rsxx_disable_ier_and_isr(card, CR_INTR_ALL);

View File

@@ -13,7 +13,6 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/freezer.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/hw_random.h> #include <linux/hw_random.h>
#include <linux/kernel.h> #include <linux/kernel.h>
@@ -422,9 +421,7 @@ static int hwrng_fillfn(void *unused)
{ {
long rc; long rc;
set_freezable(); while (!kthread_should_stop()) {
while (!kthread_freezable_should_stop(NULL)) {
struct hwrng *rng; struct hwrng *rng;
rng = get_current_rng(); rng = get_current_rng();

View File

@@ -327,7 +327,6 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
#include <linux/fips.h> #include <linux/fips.h>
#include <linux/freezer.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/irq.h> #include <linux/irq.h>
@@ -2500,8 +2499,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
* We'll be woken up again once below random_write_wakeup_thresh, * We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate. * or when the calling thread is about to terminate.
*/ */
wait_event_freezable(random_write_wait, wait_event_interruptible(random_write_wait, kthread_should_stop() ||
kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count); mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy); credit_entropy_bits(poolp, entropy);

View File

@@ -950,21 +950,7 @@ static void psp_print_fw_hdr(struct psp_context *psp,
struct amdgpu_firmware_info *ucode) struct amdgpu_firmware_info *ucode)
{ {
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
const struct sdma_firmware_header_v1_0 *sdma_hdr = struct common_firmware_header *hdr;
(const struct sdma_firmware_header_v1_0 *)
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
const struct gfx_firmware_header_v1_0 *ce_hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
const struct gfx_firmware_header_v1_0 *pfp_hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
const struct gfx_firmware_header_v1_0 *me_hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
const struct gfx_firmware_header_v1_0 *mec_hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
const struct rlc_firmware_header_v2_0 *rlc_hdr =
(const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
const struct smc_firmware_header_v1_0 *smc_hdr =
(const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
switch (ucode->ucode_id) { switch (ucode->ucode_id) {
case AMDGPU_UCODE_ID_SDMA0: case AMDGPU_UCODE_ID_SDMA0:
@@ -975,25 +961,33 @@ static void psp_print_fw_hdr(struct psp_context *psp,
case AMDGPU_UCODE_ID_SDMA5: case AMDGPU_UCODE_ID_SDMA5:
case AMDGPU_UCODE_ID_SDMA6: case AMDGPU_UCODE_ID_SDMA6:
case AMDGPU_UCODE_ID_SDMA7: case AMDGPU_UCODE_ID_SDMA7:
amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header); hdr = (struct common_firmware_header *)
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
amdgpu_ucode_print_sdma_hdr(hdr);
break; break;
case AMDGPU_UCODE_ID_CP_CE: case AMDGPU_UCODE_ID_CP_CE:
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
amdgpu_ucode_print_gfx_hdr(hdr);
break; break;
case AMDGPU_UCODE_ID_CP_PFP: case AMDGPU_UCODE_ID_CP_PFP:
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
amdgpu_ucode_print_gfx_hdr(hdr);
break; break;
case AMDGPU_UCODE_ID_CP_ME: case AMDGPU_UCODE_ID_CP_ME:
amdgpu_ucode_print_gfx_hdr(&me_hdr->header); hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
amdgpu_ucode_print_gfx_hdr(hdr);
break; break;
case AMDGPU_UCODE_ID_CP_MEC1: case AMDGPU_UCODE_ID_CP_MEC1:
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(hdr);
break; break;
case AMDGPU_UCODE_ID_RLC_G: case AMDGPU_UCODE_ID_RLC_G:
amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header); hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(hdr);
break; break;
case AMDGPU_UCODE_ID_SMC: case AMDGPU_UCODE_ID_SMC:
amdgpu_ucode_print_smc_hdr(&smc_hdr->header); hdr = (struct common_firmware_header *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(hdr);
break; break;
default: default:
break; break;

View File

@@ -4896,6 +4896,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true; power_domains->initializing = true;
/* Must happen before power domain init on VLV/CHV */
intel_update_rawclk(i915);
if (INTEL_GEN(i915) >= 11) { if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume); icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) { } else if (IS_CANNONLAKE(i915)) {

View File

@@ -199,14 +199,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \ MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \ L3_3_WB), \
/* Bypass LLC - Uncached (EHL+) */ \
MOCS_ENTRY(16, \
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
L3_1_UC), \
/* Bypass LLC - L3 (Read-Only) (EHL+) */ \
MOCS_ENTRY(17, \
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \ /* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \ MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
@@ -270,7 +262,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
L3_1_UC), L3_1_UC),
/* HW Special Case (Displayable) */ /* HW Special Case (Displayable) */
MOCS_ENTRY(61, MOCS_ENTRY(61,
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), LE_1_UC | LE_TC_1_LLC,
L3_3_WB), L3_3_WB),
}; };

View File

@@ -498,8 +498,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
goto out_free_gem; goto out_free_gem;
} }
i915_gem_object_put(obj);
ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
if (ret < 0) { if (ret < 0) {
gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret); gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
@@ -524,6 +522,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
file_count(dmabuf->file), file_count(dmabuf->file),
kref_read(&obj->base.refcount)); kref_read(&obj->base.refcount));
i915_gem_object_put(obj);
return dmabuf_fd; return dmabuf_fd;
out_free_dmabuf: out_free_dmabuf:

View File

@@ -364,9 +364,6 @@ static int i915_driver_modeset_probe(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_vga_client; goto cleanup_vga_client;
/* must happen before intel_power_domains_init_hw() on VLV/CHV */
intel_update_rawclk(dev_priv);
intel_power_domains_init_hw(dev_priv, false); intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv); intel_csr_ucode_init(dev_priv);

View File

@@ -488,7 +488,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
WARN_ON(!tcon->quirks->has_channel_0); WARN_ON(!tcon->quirks->has_channel_0);
tcon->dclk_min_div = 6; tcon->dclk_min_div = 1;
tcon->dclk_max_div = 127; tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode); sun4i_tcon0_mode_set_common(tcon, mode);

View File

@@ -39,6 +39,7 @@ struct i2c_acpi_lookup {
int index; int index;
u32 speed; u32 speed;
u32 min_speed; u32 min_speed;
u32 force_speed;
}; };
/** /**
@@ -285,6 +286,19 @@ i2c_acpi_match_device(const struct acpi_device_id *matches,
return acpi_match_device(matches, &client->dev); return acpi_match_device(matches, &client->dev);
} }
static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
/*
* These Silead touchscreen controllers only work at 400KHz, for
* some reason they do not work at 100KHz. On some devices the ACPI
* tables list another device at their bus as only being capable
* of 100KHz, testing has shown that these other devices work fine
* at 400KHz (as can be expected of any recent i2c hw) so we force
* the speed of the bus to 400 KHz if a Silead device is present.
*/
{ "MSSL1680", 0 },
{}
};
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value) void *data, void **return_value)
{ {
@@ -303,6 +317,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
if (lookup->speed <= lookup->min_speed) if (lookup->speed <= lookup->min_speed)
lookup->min_speed = lookup->speed; lookup->min_speed = lookup->speed;
if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
lookup->force_speed = 400000;
return AE_OK; return AE_OK;
} }
@@ -340,7 +357,16 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
return 0; return 0;
} }
return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0; if (lookup.force_speed) {
if (lookup.force_speed != lookup.min_speed)
dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n",
lookup.min_speed, lookup.force_speed);
return lookup.force_speed;
} else if (lookup.min_speed != UINT_MAX) {
return lookup.min_speed;
} else {
return 0;
}
} }
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);

View File

@@ -245,14 +245,14 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
} }
client = of_i2c_register_device(adap, rd->dn); client = of_i2c_register_device(adap, rd->dn);
put_device(&adap->dev);
if (IS_ERR(client)) { if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%pOF'\n", dev_err(&adap->dev, "failed to create client for '%pOF'\n",
rd->dn); rd->dn);
put_device(&adap->dev);
of_node_clear_flag(rd->dn, OF_POPULATED); of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client)); return notifier_from_errno(PTR_ERR(client));
} }
put_device(&adap->dev);
break; break;
case OF_RECONFIG_CHANGE_REMOVE: case OF_RECONFIG_CHANGE_REMOVE:
/* already depopulated? */ /* already depopulated? */

View File

@@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
goto bail_dev; goto bail_dev;
} }
hfi1_compute_tid_rdma_flow_wt();
/* /*
* These must be called before the driver is registered with * These must be called before the driver is registered with
* the PCI subsystem. * the PCI subsystem.

View File

@@ -319,7 +319,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
/* /*
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
*/ */
if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) { if (parent &&
(dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n"); dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
dd->link_gen3_capable = 0; dd->link_gen3_capable = 0;
} }

View File

@@ -2209,15 +2209,15 @@ int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
if (qp->s_flags & RVT_S_WAIT_RNR) if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail_stop; goto bail_stop;
rdi = ib_to_rvt(qp->ibqp.device); rdi = ib_to_rvt(qp->ibqp.device);
if (qp->s_rnr_retry == 0 && if (!(rdi->post_parms[wqe->wr.opcode].flags &
!((rdi->post_parms[wqe->wr.opcode].flags & RVT_OPERATION_IGN_RNR_CNT)) {
RVT_OPERATION_IGN_RNR_CNT) && if (qp->s_rnr_retry == 0) {
qp->s_rnr_retry_cnt == 0)) {
status = IB_WC_RNR_RETRY_EXC_ERR; status = IB_WC_RNR_RETRY_EXC_ERR;
goto class_b; goto class_b;
} }
if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0) if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
qp->s_rnr_retry--; qp->s_rnr_retry--;
}
/* /*
* The last valid PSN is the previous PSN. For TID RDMA WRITE * The last valid PSN is the previous PSN. For TID RDMA WRITE

View File

@@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
* C - Capcode * C - Capcode
*/ */
static u32 tid_rdma_flow_wt;
static void tid_rdma_trigger_resume(struct work_struct *work); static void tid_rdma_trigger_resume(struct work_struct *work);
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req); static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
@@ -136,6 +134,26 @@ static void update_r_next_psn_fecn(struct hfi1_packet *packet,
struct tid_rdma_flow *flow, struct tid_rdma_flow *flow,
bool fecn); bool fecn);
static void validate_r_tid_ack(struct hfi1_qp_priv *priv)
{
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
priv->r_tid_ack = priv->r_tid_tail;
}
static void tid_rdma_schedule_ack(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
priv->s_flags |= RVT_S_ACK_PENDING;
hfi1_schedule_tid_send(qp);
}
static void tid_rdma_trigger_ack(struct rvt_qp *qp)
{
validate_r_tid_ack(qp->priv);
tid_rdma_schedule_ack(qp);
}
static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p) static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
{ {
return return
@@ -3005,10 +3023,7 @@ nak_psn:
qpriv->s_nak_state = IB_NAK_PSN_ERROR; qpriv->s_nak_state = IB_NAK_PSN_ERROR;
/* We are NAK'ing the next expected PSN */ /* We are NAK'ing the next expected PSN */
qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn); qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
qpriv->s_flags |= RVT_S_ACK_PENDING; tid_rdma_trigger_ack(qp);
if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
qpriv->r_tid_ack = qpriv->r_tid_tail;
hfi1_schedule_tid_send(qp);
} }
goto unlock; goto unlock;
} }
@@ -3371,18 +3386,17 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32); return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
} }
void hfi1_compute_tid_rdma_flow_wt(void) static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
{ {
/* /*
* Heuristic for computing the RNR timeout when waiting on the flow * Heuristic for computing the RNR timeout when waiting on the flow
* queue. Rather than a computationaly expensive exact estimate of when * queue. Rather than a computationaly expensive exact estimate of when
* a flow will be available, we assume that if a QP is at position N in * a flow will be available, we assume that if a QP is at position N in
* the flow queue it has to wait approximately (N + 1) * (number of * the flow queue it has to wait approximately (N + 1) * (number of
* segments between two sync points), assuming PMTU of 4K. The rationale * segments between two sync points). The rationale for this is that
* for this is that flows are released and recycled at each sync point. * flows are released and recycled at each sync point.
*/ */
tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) / return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
TID_RDMA_MAX_SEGMENT_SIZE;
} }
static u32 position_in_queue(struct hfi1_qp_priv *qpriv, static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
@@ -3505,7 +3519,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) { if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp); ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
if (ret) { if (ret) {
to_seg = tid_rdma_flow_wt * to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
position_in_queue(qpriv, position_in_queue(qpriv,
&rcd->flow_queue); &rcd->flow_queue);
break; break;
@@ -3526,7 +3540,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
/* /*
* If overtaking req->acked_tail, send an RNR NAK. Because the * If overtaking req->acked_tail, send an RNR NAK. Because the
* QP is not queued in this case, and the issue can only be * QP is not queued in this case, and the issue can only be
* caused due a delay in scheduling the second leg which we * caused by a delay in scheduling the second leg which we
* cannot estimate, we use a rather arbitrary RNR timeout of * cannot estimate, we use a rather arbitrary RNR timeout of
* (MAX_FLOWS / 2) segments * (MAX_FLOWS / 2) segments
*/ */
@@ -3534,8 +3548,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
MAX_FLOWS)) { MAX_FLOWS)) {
ret = -EAGAIN; ret = -EAGAIN;
to_seg = MAX_FLOWS >> 1; to_seg = MAX_FLOWS >> 1;
qpriv->s_flags |= RVT_S_ACK_PENDING; tid_rdma_trigger_ack(qp);
hfi1_schedule_tid_send(qp);
break; break;
} }
@@ -4335,8 +4348,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn, trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
req); req);
trace_hfi1_tid_write_rsp_rcv_data(qp); trace_hfi1_tid_write_rsp_rcv_data(qp);
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID) validate_r_tid_ack(priv);
priv->r_tid_ack = priv->r_tid_tail;
if (opcode == TID_OP(WRITE_DATA_LAST)) { if (opcode == TID_OP(WRITE_DATA_LAST)) {
release_rdma_sge_mr(e); release_rdma_sge_mr(e);
@@ -4375,8 +4387,7 @@ void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
} }
done: done:
priv->s_flags |= RVT_S_ACK_PENDING; tid_rdma_schedule_ack(qp);
hfi1_schedule_tid_send(qp);
exit: exit:
priv->r_next_psn_kdeth = flow->flow_state.r_next_psn; priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
if (fecn) if (fecn)
@@ -4388,10 +4399,7 @@ send_nak:
if (!priv->s_nak_state) { if (!priv->s_nak_state) {
priv->s_nak_state = IB_NAK_PSN_ERROR; priv->s_nak_state = IB_NAK_PSN_ERROR;
priv->s_nak_psn = flow->flow_state.r_next_psn; priv->s_nak_psn = flow->flow_state.r_next_psn;
priv->s_flags |= RVT_S_ACK_PENDING; tid_rdma_trigger_ack(qp);
if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
priv->r_tid_ack = priv->r_tid_tail;
hfi1_schedule_tid_send(qp);
} }
goto done; goto done;
} }
@@ -4939,8 +4947,7 @@ void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
qpriv->resync = true; qpriv->resync = true;
/* RESYNC request always gets a TID RDMA ACK. */ /* RESYNC request always gets a TID RDMA ACK. */
qpriv->s_nak_state = 0; qpriv->s_nak_state = 0;
qpriv->s_flags |= RVT_S_ACK_PENDING; tid_rdma_trigger_ack(qp);
hfi1_schedule_tid_send(qp);
bail: bail:
if (fecn) if (fecn)
qp->s_flags |= RVT_S_ECN; qp->s_flags |= RVT_S_ECN;

View File

@@ -17,6 +17,7 @@
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */ #define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */ #define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT) #define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
#define TID_RDMA_SEGMENT_SHIFT 18
/* /*
* Bit definitions for priv->s_flags. * Bit definitions for priv->s_flags.
@@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len); u32 *bth1, u32 *bth2, u32 *len);
void hfi1_compute_tid_rdma_flow_wt(void);
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet); void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,

View File

@@ -59,7 +59,7 @@ enum {
#define HNS_ROCE_HEM_CHUNK_LEN \ #define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
(sizeof(struct scatterlist))) (sizeof(struct scatterlist) + sizeof(void *)))
#define check_whether_bt_num_3(type, hop_num) \ #define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2) (type < HEM_TYPE_MTT && hop_num == 2)

View File

@@ -376,7 +376,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
srq->max_gs = srq_init_attr->attr.max_sge; srq->max_gs = srq_init_attr->attr.max_sge;
srq_desc_size = max(16, 16 * srq->max_gs); srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size); srq->wqe_shift = ilog2(srq_desc_size);

View File

@@ -489,6 +489,15 @@ static void ml_ff_destroy(struct ff_device *ff)
{ {
struct ml_device *ml = ff->private; struct ml_device *ml = ff->private;
/*
* Even though we stop all playing effects when tearing down
* an input device (via input_device_flush() that calls into
* input_ff_flush() that stops and erases all effects), we
* do not actually stop the timer, and therefore we should
* do it here.
*/
del_timer_sync(&ml->timer);
kfree(ml->private); kfree(ml->private);
} }

View File

@@ -177,6 +177,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0096", /* X280 */ "LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */ "LEN0097", /* X280 -> ALPS trackpoint */
"LEN009b", /* T580 */ "LEN009b", /* T580 */
"LEN0402", /* X1 Extreme 2nd Generation */
"LEN200f", /* T450s */ "LEN200f", /* T450s */
"LEN2054", /* E480 */ "LEN2054", /* E480 */
"LEN2055", /* E580 */ "LEN2055", /* E580 */

View File

@@ -510,7 +510,6 @@ struct f11_data {
struct rmi_2d_sensor_platform_data sensor_pdata; struct rmi_2d_sensor_platform_data sensor_pdata;
unsigned long *abs_mask; unsigned long *abs_mask;
unsigned long *rel_mask; unsigned long *rel_mask;
unsigned long *result_bits;
}; };
enum f11_finger_state { enum f11_finger_state {
@@ -1057,7 +1056,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
/* /*
** init instance data, fill in values and create any sysfs files ** init instance data, fill in values and create any sysfs files
*/ */
f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3, f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 2,
GFP_KERNEL); GFP_KERNEL);
if (!f11) if (!f11)
return -ENOMEM; return -ENOMEM;
@@ -1076,8 +1075,6 @@ static int rmi_f11_initialize(struct rmi_function *fn)
+ sizeof(struct f11_data)); + sizeof(struct f11_data));
f11->rel_mask = (unsigned long *)((char *)f11 f11->rel_mask = (unsigned long *)((char *)f11
+ sizeof(struct f11_data) + mask_size); + sizeof(struct f11_data) + mask_size);
f11->result_bits = (unsigned long *)((char *)f11
+ sizeof(struct f11_data) + mask_size * 2);
set_bit(fn->irq_pos, f11->abs_mask); set_bit(fn->irq_pos, f11->abs_mask);
set_bit(fn->irq_pos + 1, f11->rel_mask); set_bit(fn->irq_pos + 1, f11->rel_mask);
@@ -1284,8 +1281,8 @@ static irqreturn_t rmi_f11_attention(int irq, void *ctx)
valid_bytes = f11->sensor.attn_size; valid_bytes = f11->sensor.attn_size;
memcpy(f11->sensor.data_pkt, drvdata->attn_data.data, memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
valid_bytes); valid_bytes);
drvdata->attn_data.data += f11->sensor.attn_size; drvdata->attn_data.data += valid_bytes;
drvdata->attn_data.size -= f11->sensor.attn_size; drvdata->attn_data.size -= valid_bytes;
} else { } else {
error = rmi_read_block(rmi_dev, error = rmi_read_block(rmi_dev,
data_base_addr, f11->sensor.data_pkt, data_base_addr, f11->sensor.data_pkt,

View File

@@ -55,6 +55,9 @@ struct f12_data {
const struct rmi_register_desc_item *data15; const struct rmi_register_desc_item *data15;
u16 data15_offset; u16 data15_offset;
unsigned long *abs_mask;
unsigned long *rel_mask;
}; };
static int rmi_f12_read_sensor_tuning(struct f12_data *f12) static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
@@ -209,8 +212,8 @@ static irqreturn_t rmi_f12_attention(int irq, void *ctx)
valid_bytes = sensor->attn_size; valid_bytes = sensor->attn_size;
memcpy(sensor->data_pkt, drvdata->attn_data.data, memcpy(sensor->data_pkt, drvdata->attn_data.data,
valid_bytes); valid_bytes);
drvdata->attn_data.data += sensor->attn_size; drvdata->attn_data.data += valid_bytes;
drvdata->attn_data.size -= sensor->attn_size; drvdata->attn_data.size -= valid_bytes;
} else { } else {
retval = rmi_read_block(rmi_dev, f12->data_addr, retval = rmi_read_block(rmi_dev, f12->data_addr,
sensor->data_pkt, sensor->pkt_size); sensor->data_pkt, sensor->pkt_size);
@@ -291,9 +294,18 @@ static int rmi_f12_write_control_regs(struct rmi_function *fn)
static int rmi_f12_config(struct rmi_function *fn) static int rmi_f12_config(struct rmi_function *fn)
{ {
struct rmi_driver *drv = fn->rmi_dev->driver; struct rmi_driver *drv = fn->rmi_dev->driver;
struct f12_data *f12 = dev_get_drvdata(&fn->dev);
struct rmi_2d_sensor *sensor;
int ret; int ret;
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask); sensor = &f12->sensor;
if (!sensor->report_abs)
drv->clear_irq_bits(fn->rmi_dev, f12->abs_mask);
else
drv->set_irq_bits(fn->rmi_dev, f12->abs_mask);
drv->clear_irq_bits(fn->rmi_dev, f12->rel_mask);
ret = rmi_f12_write_control_regs(fn); ret = rmi_f12_write_control_regs(fn);
if (ret) if (ret)
@@ -315,9 +327,12 @@ static int rmi_f12_probe(struct rmi_function *fn)
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
u16 data_offset = 0; u16 data_offset = 0;
int mask_size;
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__); rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
ret = rmi_read(fn->rmi_dev, query_addr, &buf); ret = rmi_read(fn->rmi_dev, query_addr, &buf);
if (ret < 0) { if (ret < 0) {
dev_err(&fn->dev, "Failed to read general info register: %d\n", dev_err(&fn->dev, "Failed to read general info register: %d\n",
@@ -332,10 +347,19 @@ static int rmi_f12_probe(struct rmi_function *fn)
return -ENODEV; return -ENODEV;
} }
f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL); f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data) + mask_size * 2,
GFP_KERNEL);
if (!f12) if (!f12)
return -ENOMEM; return -ENOMEM;
f12->abs_mask = (unsigned long *)((char *)f12
+ sizeof(struct f12_data));
f12->rel_mask = (unsigned long *)((char *)f12
+ sizeof(struct f12_data) + mask_size);
set_bit(fn->irq_pos, f12->abs_mask);
set_bit(fn->irq_pos + 1, f12->rel_mask);
f12->has_dribble = !!(buf & BIT(3)); f12->has_dribble = !!(buf & BIT(3));
if (fn->dev.of_node) { if (fn->dev.of_node) {

View File

@@ -359,7 +359,7 @@ static const struct vb2_ops rmi_f54_queue_ops = {
static const struct vb2_queue rmi_f54_queue = { static const struct vb2_queue rmi_f54_queue = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ, .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
.buf_struct_size = sizeof(struct vb2_buffer), .buf_struct_size = sizeof(struct vb2_v4l2_buffer),
.ops = &rmi_f54_queue_ops, .ops = &rmi_f54_queue_ops,
.mem_ops = &vb2_vmalloc_memops, .mem_ops = &vb2_vmalloc_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
@@ -601,7 +601,7 @@ static int rmi_f54_config(struct rmi_function *fn)
{ {
struct rmi_driver *drv = fn->rmi_dev->driver; struct rmi_driver *drv = fn->rmi_dev->driver;
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask); drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
return 0; return 0;
} }
@@ -730,6 +730,7 @@ static void rmi_f54_remove(struct rmi_function *fn)
video_unregister_device(&f54->vdev); video_unregister_device(&f54->vdev);
v4l2_device_unregister(&f54->v4l2); v4l2_device_unregister(&f54->v4l2);
destroy_workqueue(f54->workqueue);
} }
struct rmi_function_handler rmi_f54_handler = { struct rmi_function_handler rmi_f54_handler = {

View File

@@ -1990,11 +1990,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
/* get sysinfo */ /* get sysinfo */
md->si = &cd->sysinfo; md->si = &cd->sysinfo;
if (!md->si) {
dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n",
__func__, md->si);
goto error_get_sysinfo;
}
rc = cyttsp4_setup_input_device(cd); rc = cyttsp4_setup_input_device(cd);
if (rc) if (rc)
@@ -2004,8 +1999,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
error_init_input: error_init_input:
input_free_device(md->input); input_free_device(md->input);
error_get_sysinfo:
input_set_drvdata(md->input, NULL);
error_alloc_failed: error_alloc_failed:
dev_err(dev, "%s failed.\n", __func__); dev_err(dev, "%s failed.\n", __func__);
return rc; return rc;

View File

@@ -358,7 +358,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev);
/* HS200 is broken at this moment */ /* HS200 is broken at this moment */
host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200; host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
ret = sdhci_add_host(host); ret = sdhci_add_host(host);
if (ret) if (ret)

View File

@@ -617,6 +617,7 @@ err_free_chan:
sl->tty = NULL; sl->tty = NULL;
tty->disc_data = NULL; tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags); clear_bit(SLF_INUSE, &sl->flags);
free_netdev(sl->dev);
err_exit: err_exit:
rtnl_unlock(); rtnl_unlock();

View File

@@ -273,6 +273,19 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin; int pin;
int err; int err;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index); pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0) if (pin < 0)

View File

@@ -6280,6 +6280,10 @@ static int tg3_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) { switch (rq->type) {
case PTP_CLK_REQ_PEROUT: case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
if (rq->perout.index != 0) if (rq->perout.index != 0)
return -EINVAL; return -EINVAL;

View File

@@ -763,6 +763,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
{ {
struct net_device *dev; struct net_device *dev;
struct ep93xx_priv *ep; struct ep93xx_priv *ep;
struct resource *mem;
dev = platform_get_drvdata(pdev); dev = platform_get_drvdata(pdev);
if (dev == NULL) if (dev == NULL)
@@ -778,8 +779,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
iounmap(ep->base_addr); iounmap(ep->base_addr);
if (ep->res != NULL) { if (ep->res != NULL) {
release_resource(ep->res); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
kfree(ep->res); release_mem_region(mem->start, resource_size(mem));
} }
free_netdev(dev); free_netdev(dev);

View File

@@ -2524,6 +2524,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev); struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port); gemini_port_remove(port);
free_netdev(port->netdev);
return 0; return 0;
} }

View File

@@ -2232,8 +2232,16 @@ err_set_cdan:
err_service_reg: err_service_reg:
free_channel(priv, channel); free_channel(priv, channel);
err_alloc_ch: err_alloc_ch:
if (err == -EPROBE_DEFER) if (err == -EPROBE_DEFER) {
for (i = 0; i < priv->num_channels; i++) {
channel = priv->channel[i];
nctx = &channel->nctx;
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
free_channel(priv, channel);
}
priv->num_channels = 0;
return err; return err;
}
if (cpumask_empty(&priv->dpio_cpumask)) { if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); dev_err(dev, "No cpu with an affine DPIO/DPCON\n");

View File

@@ -70,11 +70,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
struct hns3_link_mode_mapping {
u32 hns3_link_mode;
u32 ethtool_link_mode;
};
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);

View File

@@ -124,7 +124,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (ret) if (ret)
return ret; return ret;
for (i = 0; i < HNAE3_MAX_TC; i++) { for (i = 0; i < hdev->tc_max; i++) {
switch (ets->tc_tsa[i]) { switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT: case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode != if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -318,6 +318,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
struct net_device *netdev = h->kinfo.netdev; struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc; u8 i, j, pfc_map, *prio_tc;
int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
@@ -347,7 +348,21 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hclge_tm_pfc_info_update(hdev); hclge_tm_pfc_info_update(hdev);
return hclge_pause_setup_hw(hdev, false); ret = hclge_pause_setup_hw(hdev, false);
if (ret)
return ret;
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
ret = hclge_buffer_alloc(hdev);
if (ret) {
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
} }
/* DCBX configuration */ /* DCBX configuration */

View File

@@ -6263,11 +6263,23 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
req = (struct hclge_mac_vlan_switch_cmd *)desc.data; req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
/* read current config parameter */
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
false); true);
req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
req->func_id = cpu_to_le32(func_id); req->func_id = cpu_to_le32(func_id);
req->switch_param = switch_param;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"read mac vlan switch parameter fail, ret = %d\n", ret);
return ret;
}
/* modify and write new config parameter */
hclge_cmd_reuse_desc(&desc, false);
req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask; req->param_mask = param_mask;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);

View File

@@ -521,6 +521,19 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) { switch (rq->type) {
case PTP_CLK_REQ_EXTTS: case PTP_CLK_REQ_EXTTS:
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
if (on) { if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index); rq->extts.index);
@@ -551,6 +564,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
return 0; return 0;
case PTP_CLK_REQ_PEROUT: case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
if (on) { if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
rq->perout.index); rq->perout.index);

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 CGX driver /* Marvell OcteonTx2 CGX driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 CGX driver /* Marvell OcteonTx2 CGX driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 RVU Admin Function driver /* Marvell OcteonTx2 RVU Admin Function driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 RVU Admin Function driver /* Marvell OcteonTx2 RVU Admin Function driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 RVU Admin Function driver /* Marvell OcteonTx2 RVU Admin Function driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 RVU Admin Function driver /* Marvell OcteonTx2 RVU Admin Function driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 RVU Admin Function driver /* Marvell OcteonTx2 RVU Admin Function driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 RVU Admin Function driver /* Marvell OcteonTx2 RVU Admin Function driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
* Marvell OcteonTx2 RVU Admin Function driver /* Marvell OcteonTx2 RVU Admin Function driver
* *
* Copyright (C) 2018 Marvell International Ltd. * Copyright (C) 2018 Marvell International Ltd.
* *

View File

@@ -4010,6 +4010,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister; goto err_params_unregister;
devlink_params_publish(devlink); devlink_params_publish(devlink);
devlink_reload_enable(devlink);
pci_save_state(pdev); pci_save_state(pdev);
return 0; return 0;
@@ -4121,6 +4122,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv); struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0; int active_vfs = 0;
devlink_reload_disable(devlink);
if (mlx4_is_slave(dev)) if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;

View File

@@ -236,6 +236,19 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev)) if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
if (rq->extts.index >= clock->ptp_info.n_pins) if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL; return -EINVAL;
@@ -290,6 +303,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (!MLX5_PPS_CAP(mdev)) if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
if (rq->perout.index >= clock->ptp_info.n_pins) if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL; return -EINVAL;

View File

@@ -1189,6 +1189,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (mlxsw_driver->params_register) if (mlxsw_driver->params_register)
devlink_params_publish(devlink); devlink_params_publish(devlink);
if (!reload)
devlink_reload_enable(devlink);
return 0; return 0;
err_thermal_init: err_thermal_init:
@@ -1249,6 +1252,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{ {
struct devlink *devlink = priv_to_devlink(mlxsw_core); struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (!reload)
devlink_reload_disable(devlink);
if (devlink_is_reload_failed(devlink)) { if (devlink_is_reload_failed(devlink)) {
if (!reload) if (!reload)
/* Only the parts that were not de-initialized in the /* Only the parts that were not de-initialized in the

View File

@@ -429,6 +429,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
int pulse_width = 0; int pulse_width = 0;
int perout_bit = 0; int perout_bit = 0;
/* Reject requests with unsupported flags */
if (perout->flags)
return -EOPNOTSUPP;
if (!on) { if (!on) {
lan743x_ptp_perout_off(adapter); lan743x_ptp_perout_off(adapter);
return 0; return 0;

View File

@@ -955,6 +955,8 @@ enum RAVB_QUEUE {
#define NUM_RX_QUEUE 2 #define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2 #define NUM_TX_QUEUE 2
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
/* TX descriptors per packet */ /* TX descriptors per packet */
#define NUM_TX_DESC_GEN2 2 #define NUM_TX_DESC_GEN2 2
#define NUM_TX_DESC_GEN3 1 #define NUM_TX_DESC_GEN3 1
@@ -1018,7 +1020,6 @@ struct ravb_private {
u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */ u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
u32 cur_tx[NUM_TX_QUEUE]; u32 cur_tx[NUM_TX_QUEUE];
u32 dirty_tx[NUM_TX_QUEUE]; u32 dirty_tx[NUM_TX_QUEUE];
u32 rx_buf_sz; /* Based on MTU+slack. */
struct napi_struct napi[NUM_RX_QUEUE]; struct napi_struct napi[NUM_RX_QUEUE];
struct work_struct work; struct work_struct work;
/* MII transceiver section. */ /* MII transceiver section. */

View File

@@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
le32_to_cpu(desc->dptr))) le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent, dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr), le32_to_cpu(desc->dptr),
priv->rx_buf_sz, RX_BUF_SZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
ring_size = sizeof(struct ravb_ex_rx_desc) * ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) { for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */ /* RX descriptor */
rx_desc = &priv->rx_ring[q][i]; rx_desc = &priv->rx_ring[q][i];
rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
priv->rx_buf_sz, RX_BUF_SZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which /* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening... * should prevent DMA from happening...
@@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int ring_size; int ring_size;
int i; int i;
priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
/* Allocate RX and TX skb rings */ /* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
sizeof(*priv->rx_skb[q]), GFP_KERNEL); sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error; goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) { for (i = 0; i < priv->num_rx_ring[q]; i++) {
skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
if (!skb) if (!skb)
goto error; goto error;
ravb_set_buffer_align(skb); ravb_set_buffer_align(skb);
@@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry]; skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL; priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
priv->rx_buf_sz, RX_BUF_SZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ? get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry]; desc = &priv->rx_ring[q][entry];
desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) { if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev, skb = netdev_alloc_skb(ndev,
priv->rx_buf_sz + RX_BUF_SZ +
RAVB_ALIGN - 1); RAVB_ALIGN - 1);
if (!skb) if (!skb)
break; /* Better luck next round. */ break; /* Better luck next round. */
@@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int ravb_change_mtu(struct net_device *ndev, int new_mtu) static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{ {
if (netif_running(ndev)) struct ravb_private *priv = netdev_priv(ndev);
return -EBUSY;
ndev->mtu = new_mtu; ndev->mtu = new_mtu;
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
ravb_emac_init(ndev);
}
netdev_update_features(ndev); netdev_update_features(ndev);
return 0; return 0;

View File

@@ -182,6 +182,13 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev; struct net_device *ndev = priv->ndev;
unsigned long flags; unsigned long flags;
/* Reject requests with unsupported flags */
if (req->flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
if (req->index) if (req->index)
return -EINVAL; return -EINVAL;
@@ -211,6 +218,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags; unsigned long flags;
int error = 0; int error = 0;
/* Reject requests with unsupported flags */
if (req->flags)
return -EOPNOTSUPP;
if (req->index) if (req->index)
return -EINVAL; return -EINVAL;

View File

@@ -1226,7 +1226,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
dwmac_mux: dwmac_mux:
sun8i_dwmac_unset_syscon(gmac); sun8i_dwmac_unset_syscon(gmac);
dwmac_exit: dwmac_exit:
sun8i_dwmac_exit(pdev, plat_dat->bsp_priv); stmmac_pltfr_remove(pdev);
return ret; return ret;
} }

View File

@@ -1,4 +1,4 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. // Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
// stmmac Support for 5.xx Ethernet QoS cores // stmmac Support for 5.xx Ethernet QoS cores

View File

@@ -1,4 +1,4 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/* /*
* Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
* stmmac XGMAC definitions. * stmmac XGMAC definitions.

View File

@@ -1,4 +1,4 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. // Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
// stmmac HW Interface Callbacks // stmmac HW Interface Callbacks

View File

@@ -140,6 +140,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
switch (rq->type) { switch (rq->type) {
case PTP_CLK_REQ_PEROUT: case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
cfg = &priv->pps[rq->perout.index]; cfg = &priv->pps[rq->perout.index];
cfg->start.tv_sec = rq->perout.start.sec; cfg->start.tv_sec = rq->perout.start.sec;

View File

@@ -708,6 +708,7 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
goto err_debugfs_exit; goto err_debugfs_exit;
devlink_params_publish(devlink); devlink_params_publish(devlink);
devlink_reload_enable(devlink);
return nsim_dev; return nsim_dev;
err_debugfs_exit: err_debugfs_exit:
@@ -732,6 +733,7 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
{ {
struct devlink *devlink = priv_to_devlink(nsim_dev); struct devlink *devlink = priv_to_devlink(nsim_dev);
devlink_reload_disable(devlink);
nsim_bpf_dev_exit(nsim_dev); nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev); nsim_dev_debugfs_exit(nsim_dev);
nsim_dev_traps_exit(devlink); nsim_dev_traps_exit(devlink);

View File

@@ -469,6 +469,19 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
switch (rq->type) { switch (rq->type) {
case PTP_CLK_REQ_EXTTS: case PTP_CLK_REQ_EXTTS:
/* Reject requests with unsupported flags */
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
return -EOPNOTSUPP;
index = rq->extts.index; index = rq->extts.index;
if (index >= N_EXT_TS) if (index >= N_EXT_TS)
return -EINVAL; return -EINVAL;
@@ -491,6 +504,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return 0; return 0;
case PTP_CLK_REQ_PEROUT: case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
if (rq->perout.index >= N_PER_OUT) if (rq->perout.index >= N_PER_OUT)
return -EINVAL; return -EINVAL;
return periodic_output(clock, rq, on, rq->perout.index); return periodic_output(clock, rq, on, rq->perout.index);

View File

@@ -64,11 +64,12 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
if (mdiodev->dev.of_node) if (mdiodev->dev.of_node)
reset = devm_reset_control_get_exclusive(&mdiodev->dev, reset = devm_reset_control_get_exclusive(&mdiodev->dev,
"phy"); "phy");
if (PTR_ERR(reset) == -ENOENT || if (IS_ERR(reset)) {
PTR_ERR(reset) == -ENOTSUPP) if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOSYS)
reset = NULL; reset = NULL;
else if (IS_ERR(reset)) else
return PTR_ERR(reset); return PTR_ERR(reset);
}
mdiodev->reset_ctrl = reset; mdiodev->reset_ctrl = reset;

View File

@@ -855,6 +855,7 @@ err_free_chan:
sl->tty = NULL; sl->tty = NULL;
tty->disc_data = NULL; tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags); clear_bit(SLF_INUSE, &sl->flags);
free_netdev(sl->dev);
err_exit: err_exit:
rtnl_unlock(); rtnl_unlock();

View File

@@ -196,7 +196,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */ /* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < 0) { if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
goto free; goto free;
} }

View File

@@ -579,7 +579,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, iface_no, &max_datagram_size, sizeof(max_datagram_size)); 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
if (err < sizeof(max_datagram_size)) { if (err != sizeof(max_datagram_size)) {
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
goto out; goto out;
} }

View File

@@ -1371,6 +1371,8 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
/* 4. Gobi 1000 devices */ /* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View File

@@ -251,27 +251,23 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int mss = skb_shinfo(skb)->gso_size;
u16 length, iv_len, amsdu_pad; u16 length, amsdu_pad;
u8 *start_hdr; u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page; struct iwl_tso_hdr_page *hdr_page;
struct page **page_ptr; struct page **page_ptr;
struct tso_t tso; struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
iv_len = ieee80211_has_protected(hdr->frame_control) ?
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0); &dev_cmd->hdr, start_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0; amsdu_pad = 0;
/* total amount of header we may need for this A-MSDU */ /* total amount of header we may need for this A-MSDU */
hdr_room = DIV_ROUND_UP(total_len, mss) * hdr_room = DIV_ROUND_UP(total_len, mss) *
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */ /* Our device supports 9 segments at most, it will fit in 1 page */
hdr_page = get_page_hdr(trans, hdr_room); hdr_page = get_page_hdr(trans, hdr_room);
@@ -282,14 +278,12 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
start_hdr = hdr_page->pos; start_hdr = hdr_page->pos;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
*page_ptr = hdr_page->page; *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
/* /*
* Pull the ieee80211 header + IV to be able to use TSO core, * Pull the ieee80211 header to be able to use TSO core,
* we will restore it for the tx_status flow. * we will restore it for the tx_status flow.
*/ */
skb_pull(skb, hdr_len + iv_len); skb_pull(skb, hdr_len);
/* /*
* Remove the length of all the headers that we don't actually * Remove the length of all the headers that we don't actually
@@ -364,8 +358,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
} }
} }
/* re -add the WiFi header and IV */ /* re -add the WiFi header */
skb_push(skb, hdr_len + iv_len); skb_push(skb, hdr_len);
return 0; return 0;

View File

@@ -220,8 +220,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
if (r == -EREMOTEIO) { if (r == -EREMOTEIO) {
phy->hard_fault = r; phy->hard_fault = r;
skb = NULL; if (info->mode == NXP_NCI_MODE_FW)
} else if (r < 0) { nxp_nci_fw_recv_frame(phy->ndev, NULL);
}
if (r < 0) {
nfc_err(&client->dev, "Read failed with error %d\n", r); nfc_err(&client->dev, "Read failed with error %d\n", r);
goto exit_irq_handled; goto exit_irq_handled;
} }

View File

@@ -149,11 +149,21 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EFAULT; err = -EFAULT;
break; break;
} }
if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) || if (cmd == PTP_EXTTS_REQUEST2) {
req.extts.rsv[0] || req.extts.rsv[1]) && /* Tell the drivers to check the flags carefully. */
cmd == PTP_EXTTS_REQUEST2) { req.extts.flags |= PTP_STRICT_FLAGS;
/* Make sure no reserved bit is set. */
if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
req.extts.rsv[0] || req.extts.rsv[1]) {
err = -EINVAL; err = -EINVAL;
break; break;
}
/* Ensure one of the rising/falling edge bits is set. */
if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
(req.extts.flags & PTP_EXTTS_EDGES) == 0) {
err = -EINVAL;
break;
}
} else if (cmd == PTP_EXTTS_REQUEST) { } else if (cmd == PTP_EXTTS_REQUEST) {
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS; req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
req.extts.rsv[0] = 0; req.extts.rsv[0] = 0;

View File

@@ -803,7 +803,12 @@ success:
continue; continue;
if (cookie->inodes[i]) { if (cookie->inodes[i]) {
afs_vnode_commit_status(&fc, AFS_FS_I(cookie->inodes[i]), struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]);
if (test_bit(AFS_VNODE_UNSET, &iv->flags))
continue;
afs_vnode_commit_status(&fc, iv,
scb->cb_break, NULL, scb); scb->cb_break, NULL, scb);
continue; continue;
} }

View File

@@ -2179,7 +2179,7 @@ SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
struct __compat_aio_sigset { struct __compat_aio_sigset {
compat_sigset_t __user *sigmask; compat_uptr_t sigmask;
compat_size_t sigsetsize; compat_size_t sigsetsize;
}; };
@@ -2193,7 +2193,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
struct old_timespec32 __user *, timeout, struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig) const struct __compat_aio_sigset __user *, usig)
{ {
struct __compat_aio_sigset ksig = { NULL, }; struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t; struct timespec64 t;
bool interrupted; bool interrupted;
int ret; int ret;
@@ -2204,7 +2204,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT; return -EFAULT;
ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize); ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret) if (ret)
return ret; return ret;
@@ -2228,7 +2228,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
struct __kernel_timespec __user *, timeout, struct __kernel_timespec __user *, timeout,
const struct __compat_aio_sigset __user *, usig) const struct __compat_aio_sigset __user *, usig)
{ {
struct __compat_aio_sigset ksig = { NULL, }; struct __compat_aio_sigset ksig = { 0, };
struct timespec64 t; struct timespec64 t;
bool interrupted; bool interrupted;
int ret; int ret;
@@ -2239,7 +2239,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT; return -EFAULT;
ret = set_compat_user_sigmask(ksig.sigmask, ksig.sigsetsize); ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
if (ret) if (ret)
return ret; return ret;

View File

@@ -459,9 +459,10 @@ static struct dentry *autofs_expire_indirect(struct super_block *sb,
*/ */
how &= ~AUTOFS_EXP_LEAVES; how &= ~AUTOFS_EXP_LEAVES;
found = should_expire(expired, mnt, timeout, how); found = should_expire(expired, mnt, timeout, how);
if (!found || found != expired) if (found != expired) { // something has changed, continue
/* Something has changed, continue */ dput(found);
goto next; goto next;
}
if (expired != dentry) if (expired != dentry)
dput(dentry); dput(dentry);

View File

@@ -9744,6 +9744,18 @@ out_fail:
commit_transaction = true; commit_transaction = true;
} }
if (commit_transaction) { if (commit_transaction) {
/*
* We may have set commit_transaction when logging the new name
* in the destination root, in which case we left the source
* root context in the list of log contextes. So make sure we
* remove it to avoid invalid memory accesses, since the context
* was allocated in our stack frame.
*/
if (sync_log_root) {
mutex_lock(&root->log_mutex);
list_del_init(&ctx_root.list);
mutex_unlock(&root->log_mutex);
}
ret = btrfs_commit_transaction(trans); ret = btrfs_commit_transaction(trans);
} else { } else {
int ret2; int ret2;
@@ -9757,6 +9769,9 @@ out_notrans:
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem); up_read(&fs_info->subvol_sem);
ASSERT(list_empty(&ctx_root.list));
ASSERT(list_empty(&ctx_dest.list));
return ret; return ret;
} }

View File

@@ -753,6 +753,9 @@ static void ceph_aio_complete(struct inode *inode,
if (!atomic_dec_and_test(&aio_req->pending_reqs)) if (!atomic_dec_and_test(&aio_req->pending_reqs))
return; return;
if (aio_req->iocb->ki_flags & IOCB_DIRECT)
inode_dio_end(inode);
ret = aio_req->error; ret = aio_req->error;
if (!ret) if (!ret)
ret = aio_req->total_len; ret = aio_req->total_len;
@@ -1091,6 +1094,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
CEPH_CAP_FILE_RD); CEPH_CAP_FILE_RD);
list_splice(&aio_req->osd_reqs, &osd_reqs); list_splice(&aio_req->osd_reqs, &osd_reqs);
inode_dio_begin(inode);
while (!list_empty(&osd_reqs)) { while (!list_empty(&osd_reqs)) {
req = list_first_entry(&osd_reqs, req = list_first_entry(&osd_reqs,
struct ceph_osd_request, struct ceph_osd_request,
@@ -1264,14 +1268,24 @@ again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
if (iocb->ki_flags & IOCB_DIRECT)
ceph_start_io_direct(inode);
else
ceph_start_io_read(inode);
if (fi->fmode & CEPH_FILE_MODE_LAZY) if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else else
want = CEPH_CAP_FILE_CACHE; want = CEPH_CAP_FILE_CACHE;
ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
&got, &pinned_page); &got, &pinned_page);
if (ret < 0) if (ret < 0) {
if (iocb->ki_flags & IOCB_DIRECT)
ceph_end_io_direct(inode);
else
ceph_end_io_read(inode);
return ret; return ret;
}
if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 || if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_flags & IOCB_DIRECT) || (iocb->ki_flags & IOCB_DIRECT) ||
@@ -1283,16 +1297,12 @@ again:
if (ci->i_inline_version == CEPH_INLINE_NONE) { if (ci->i_inline_version == CEPH_INLINE_NONE) {
if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) { if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
ceph_start_io_direct(inode);
ret = ceph_direct_read_write(iocb, to, ret = ceph_direct_read_write(iocb, to,
NULL, NULL); NULL, NULL);
ceph_end_io_direct(inode);
if (ret >= 0 && ret < len) if (ret >= 0 && ret < len)
retry_op = CHECK_EOF; retry_op = CHECK_EOF;
} else { } else {
ceph_start_io_read(inode);
ret = ceph_sync_read(iocb, to, &retry_op); ret = ceph_sync_read(iocb, to, &retry_op);
ceph_end_io_read(inode);
} }
} else { } else {
retry_op = READ_INLINE; retry_op = READ_INLINE;
@@ -1303,11 +1313,10 @@ again:
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got)); ceph_cap_string(got));
ceph_add_rw_context(fi, &rw_ctx); ceph_add_rw_context(fi, &rw_ctx);
ceph_start_io_read(inode);
ret = generic_file_read_iter(iocb, to); ret = generic_file_read_iter(iocb, to);
ceph_end_io_read(inode);
ceph_del_rw_context(fi, &rw_ctx); ceph_del_rw_context(fi, &rw_ctx);
} }
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
if (pinned_page) { if (pinned_page) {
@@ -1315,6 +1324,12 @@ again:
pinned_page = NULL; pinned_page = NULL;
} }
ceph_put_cap_refs(ci, got); ceph_put_cap_refs(ci, got);
if (iocb->ki_flags & IOCB_DIRECT)
ceph_end_io_direct(inode);
else
ceph_end_io_read(inode);
if (retry_op > HAVE_RETRIED && ret >= 0) { if (retry_op > HAVE_RETRIED && ret >= 0) {
int statret; int statret;
struct page *page = NULL; struct page *page = NULL;

View File

@@ -128,12 +128,19 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
struct inode *inode) struct inode *inode)
{ {
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
struct dentry *lower_dir_dentry; struct dentry *lower_dir_dentry;
struct inode *lower_dir_inode;
int rc; int rc;
dget(lower_dentry); lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
lower_dir_dentry = lock_parent(lower_dentry); lower_dir_inode = d_inode(lower_dir_dentry);
inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
dget(lower_dentry); // don't even try to make the lower negative
if (lower_dentry->d_parent != lower_dir_dentry)
rc = -EINVAL;
else if (d_unhashed(lower_dentry))
rc = -EINVAL;
else
rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL); rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
if (rc) { if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
@@ -142,10 +149,11 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
fsstack_copy_attr_times(dir, lower_dir_inode); fsstack_copy_attr_times(dir, lower_dir_inode);
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink); set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
inode->i_ctime = dir->i_ctime; inode->i_ctime = dir->i_ctime;
d_drop(dentry);
out_unlock: out_unlock:
unlock_dir(lower_dir_dentry);
dput(lower_dentry); dput(lower_dentry);
inode_unlock(lower_dir_inode);
if (!rc)
d_drop(dentry);
return rc; return rc;
} }
@@ -311,9 +319,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry, static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry) struct dentry *lower_dentry)
{ {
struct inode *inode, *lower_inode = d_inode(lower_dentry); struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
struct inode *inode, *lower_inode;
struct ecryptfs_dentry_info *dentry_info; struct ecryptfs_dentry_info *dentry_info;
struct vfsmount *lower_mnt;
int rc = 0; int rc = 0;
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL); dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
@@ -322,16 +330,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
fsstack_copy_attr_atime(d_inode(dentry->d_parent), fsstack_copy_attr_atime(d_inode(dentry->d_parent),
d_inode(lower_dentry->d_parent)); d_inode(path->dentry));
BUG_ON(!d_count(lower_dentry)); BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info); ecryptfs_set_dentry_private(dentry, dentry_info);
dentry_info->lower_path.mnt = lower_mnt; dentry_info->lower_path.mnt = mntget(path->mnt);
dentry_info->lower_path.dentry = lower_dentry; dentry_info->lower_path.dentry = lower_dentry;
if (d_really_is_negative(lower_dentry)) { /*
* negative dentry can go positive under us here - its parent is not
* locked. That's OK and that could happen just as we return from
* ecryptfs_lookup() anyway. Just need to be careful and fetch
* ->d_inode only once - it's not stable here.
*/
lower_inode = READ_ONCE(lower_dentry->d_inode);
if (!lower_inode) {
/* We want to add because we couldn't find in lower */ /* We want to add because we couldn't find in lower */
d_add(dentry, NULL); d_add(dentry, NULL);
return NULL; return NULL;
@@ -512,22 +527,30 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
{ {
struct dentry *lower_dentry; struct dentry *lower_dentry;
struct dentry *lower_dir_dentry; struct dentry *lower_dir_dentry;
struct inode *lower_dir_inode;
int rc; int rc;
lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_dentry = ecryptfs_dentry_to_lower(dentry);
dget(dentry); lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
lower_dir_dentry = lock_parent(lower_dentry); lower_dir_inode = d_inode(lower_dir_dentry);
dget(lower_dentry);
rc = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry); inode_lock_nested(lower_dir_inode, I_MUTEX_PARENT);
dput(lower_dentry); dget(lower_dentry); // don't even try to make the lower negative
if (!rc && d_really_is_positive(dentry)) if (lower_dentry->d_parent != lower_dir_dentry)
rc = -EINVAL;
else if (d_unhashed(lower_dentry))
rc = -EINVAL;
else
rc = vfs_rmdir(lower_dir_inode, lower_dentry);
if (!rc) {
clear_nlink(d_inode(dentry)); clear_nlink(d_inode(dentry));
fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry)); fsstack_copy_attr_times(dir, lower_dir_inode);
set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink); set_nlink(dir, lower_dir_inode->i_nlink);
unlock_dir(lower_dir_dentry); }
dput(lower_dentry);
inode_unlock(lower_dir_inode);
if (!rc) if (!rc)
d_drop(dentry); d_drop(dentry);
dput(dentry);
return rc; return rc;
} }
@@ -565,20 +588,22 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dentry; struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry; struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry; struct dentry *lower_new_dir_dentry;
struct dentry *trap = NULL; struct dentry *trap;
struct inode *target_inode; struct inode *target_inode;
if (flags) if (flags)
return -EINVAL; return -EINVAL;
lower_old_dir_dentry = ecryptfs_dentry_to_lower(old_dentry->d_parent);
lower_new_dir_dentry = ecryptfs_dentry_to_lower(new_dentry->d_parent);
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
dget(lower_old_dentry);
dget(lower_new_dentry);
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
target_inode = d_inode(new_dentry); target_inode = d_inode(new_dentry);
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
dget(lower_new_dentry);
rc = -EINVAL; rc = -EINVAL;
if (lower_old_dentry->d_parent != lower_old_dir_dentry) if (lower_old_dentry->d_parent != lower_old_dir_dentry)
goto out_lock; goto out_lock;
@@ -606,11 +631,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (new_dir != old_dir) if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry)); fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
out_lock: out_lock:
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
dput(lower_new_dir_dentry);
dput(lower_old_dir_dentry);
dput(lower_new_dentry); dput(lower_new_dentry);
dput(lower_old_dentry); unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
return rc; return rc;
} }

View File

@@ -519,26 +519,33 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
* inode is actually connected to the parent. * inode is actually connected to the parent.
*/ */
err = exportfs_get_name(mnt, target_dir, nbuf, result); err = exportfs_get_name(mnt, target_dir, nbuf, result);
if (!err) { if (err) {
inode_lock(target_dir->d_inode); dput(target_dir);
nresult = lookup_one_len(nbuf, target_dir, goto err_result;
strlen(nbuf));
inode_unlock(target_dir->d_inode);
if (!IS_ERR(nresult)) {
if (nresult->d_inode) {
dput(result);
result = nresult;
} else
dput(nresult);
}
} }
inode_lock(target_dir->d_inode);
nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
if (!IS_ERR(nresult)) {
if (unlikely(nresult->d_inode != result->d_inode)) {
dput(nresult);
nresult = ERR_PTR(-ESTALE);
}
}
inode_unlock(target_dir->d_inode);
/* /*
* At this point we are done with the parent, but it's pinned * At this point we are done with the parent, but it's pinned
* by the child dentry anyway. * by the child dentry anyway.
*/ */
dput(target_dir); dput(target_dir);
if (IS_ERR(nresult)) {
err = PTR_ERR(nresult);
goto err_result;
}
dput(result);
result = nresult;
/* /*
* And finally make sure the dentry is actually acceptable * And finally make sure the dentry is actually acceptable
* to NFSD. * to NFSD.

View File

@@ -326,6 +326,7 @@ struct io_kiocb {
#define REQ_F_TIMEOUT 1024 /* timeout request */ #define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */ #define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ #define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
u64 user_data; u64 user_data;
u32 result; u32 result;
u32 sequence; u32 sequence;
@@ -453,10 +454,14 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
struct io_kiocb *req; struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
if (req && !__io_sequence_defer(ctx, req)) { if (req) {
if (req->flags & REQ_F_TIMEOUT_NOSEQ)
return NULL;
if (!__io_sequence_defer(ctx, req)) {
list_del_init(&req->list); list_del_init(&req->list);
return req; return req;
} }
}
return NULL; return NULL;
} }
@@ -1225,7 +1230,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
} }
} }
return 0; return len;
} }
static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
@@ -1941,18 +1946,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT; return -EFAULT;
req->flags |= REQ_F_TIMEOUT;
/* /*
* sqe->off holds how many events that need to occur for this * sqe->off holds how many events that need to occur for this
* timeout event to be satisfied. * timeout event to be satisfied. If it isn't set, then this is
* a pure timeout request, sequence isn't used.
*/ */
count = READ_ONCE(sqe->off); count = READ_ONCE(sqe->off);
if (!count) if (!count) {
count = 1; req->flags |= REQ_F_TIMEOUT_NOSEQ;
spin_lock_irq(&ctx->completion_lock);
entry = ctx->timeout_list.prev;
goto add;
}
req->sequence = ctx->cached_sq_head + count - 1; req->sequence = ctx->cached_sq_head + count - 1;
/* reuse it to store the count */ /* reuse it to store the count */
req->submit.sequence = count; req->submit.sequence = count;
req->flags |= REQ_F_TIMEOUT;
/* /*
* Insertion sort, ensuring the first entry in the list is always * Insertion sort, ensuring the first entry in the list is always
@@ -1964,6 +1975,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned nxt_sq_head; unsigned nxt_sq_head;
long long tmp, tmp_nxt; long long tmp, tmp_nxt;
if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
continue;
/* /*
* Since cached_sq_head + count - 1 can overflow, use type long * Since cached_sq_head + count - 1 can overflow, use type long
* long to store it. * long to store it.
@@ -1990,6 +2004,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
nxt->sequence++; nxt->sequence++;
} }
req->sequence -= span; req->sequence -= span;
add:
list_add(&req->list, entry); list_add(&req->list, entry);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
@@ -2283,6 +2298,7 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
switch (op) { switch (op) {
case IORING_OP_NOP: case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE: case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT:
return false; return false;
default: default:
return true; return true;

View File

@@ -2500,8 +2500,10 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
time64_to_tm(sb->s_time_max, 0, &tm); time64_to_tm(sb->s_time_max, 0, &tm);
pr_warn("Mounted %s file system at %s supports timestamps until %04ld (0x%llx)\n", pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
sb->s_type->name, mntpath, sb->s_type->name,
is_mounted(mnt) ? "remounted" : "mounted",
mntpath,
tm.tm_year+1900, (unsigned long long)sb->s_time_max); tm.tm_year+1900, (unsigned long long)sb->s_time_max);
free_page((unsigned long)buf); free_page((unsigned long)buf);
@@ -2794,14 +2796,11 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
if (IS_ERR(mnt)) if (IS_ERR(mnt))
return PTR_ERR(mnt); return PTR_ERR(mnt);
error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
if (error < 0) {
mntput(mnt);
return error;
}
mnt_warn_timestamp_expiry(mountpoint, mnt); mnt_warn_timestamp_expiry(mountpoint, mnt);
error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
if (error < 0)
mntput(mnt);
return error; return error;
} }

View File

@@ -65,5 +65,6 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
void *data); void *data);
extern int can_send(struct sk_buff *skb, int loop); extern int can_send(struct sk_buff *skb, int loop);
void can_sock_destruct(struct sock *sk);
#endif /* !_CAN_CORE_H */ #endif /* !_CAN_CORE_H */

View File

@@ -336,7 +336,8 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32 #define QI_DEV_IOTLB_MAX_INVS 32
@@ -360,7 +361,8 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32 #define QI_DEV_EIOTLB_MAX_INVS 32
/* Page group response descriptor QW0 */ /* Page group response descriptor QW0 */

View File

@@ -119,6 +119,7 @@ extern struct memory_block *find_memory_block(struct mem_section *);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size, extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func); void *arg, walk_memory_blocks_func_t func);
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */

View File

@@ -38,7 +38,8 @@ struct devlink {
struct device *dev; struct device *dev;
possible_net_t _net; possible_net_t _net;
struct mutex lock; struct mutex lock;
bool reload_failed; u8 reload_failed:1,
reload_enabled:1;
char priv[0] __aligned(NETDEV_ALIGN); char priv[0] __aligned(NETDEV_ALIGN);
}; };
@@ -774,6 +775,8 @@ struct ib_device;
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size); struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
int devlink_register(struct devlink *devlink, struct device *dev); int devlink_register(struct devlink *devlink, struct device *dev);
void devlink_unregister(struct devlink *devlink); void devlink_unregister(struct devlink *devlink);
void devlink_reload_enable(struct devlink *devlink);
void devlink_reload_disable(struct devlink *devlink);
void devlink_free(struct devlink *devlink); void devlink_free(struct devlink *devlink);
int devlink_port_register(struct devlink *devlink, int devlink_port_register(struct devlink *devlink,
struct devlink_port *devlink_port, struct devlink_port *devlink_port,

View File

@@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
), ),
TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n", TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6, __entry->saddr_v6, __entry->daddr_v6,
show_tcp_state_name(__entry->state)) show_tcp_state_name(__entry->state))

View File

@@ -421,6 +421,7 @@ enum devlink_attr {
DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */ DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */
DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS, /* u64 */
/* add new attributes above here, update the policy in devlink.c */ /* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX, __DEVLINK_ATTR_MAX,

View File

@@ -31,13 +31,16 @@
#define PTP_ENABLE_FEATURE (1<<0) #define PTP_ENABLE_FEATURE (1<<0)
#define PTP_RISING_EDGE (1<<1) #define PTP_RISING_EDGE (1<<1)
#define PTP_FALLING_EDGE (1<<2) #define PTP_FALLING_EDGE (1<<2)
#define PTP_STRICT_FLAGS (1<<3)
#define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE)
/* /*
* flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl. * flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl.
*/ */
#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \ #define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
PTP_RISING_EDGE | \ PTP_RISING_EDGE | \
PTP_FALLING_EDGE) PTP_FALLING_EDGE | \
PTP_STRICT_FLAGS)
/* /*
* flag fields valid for the original PTP_EXTTS_REQUEST ioctl. * flag fields valid for the original PTP_EXTTS_REQUEST ioctl.

View File

@@ -351,12 +351,12 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent)
struct dentry *d = kern_path_locked(watch->path, parent); struct dentry *d = kern_path_locked(watch->path, parent);
if (IS_ERR(d)) if (IS_ERR(d))
return PTR_ERR(d); return PTR_ERR(d);
inode_unlock(d_backing_inode(parent->dentry));
if (d_is_positive(d)) { if (d_is_positive(d)) {
/* update watch filter fields */ /* update watch filter fields */
watch->dev = d->d_sb->s_dev; watch->dev = d->d_sb->s_dev;
watch->ino = d_backing_inode(d)->i_ino; watch->ino = d_backing_inode(d)->i_ino;
} }
inode_unlock(d_backing_inode(parent->dentry));
dput(d); dput(d);
return 0; return 0;
} }

View File

@@ -2119,11 +2119,12 @@ int cgroup_do_get_tree(struct fs_context *fc)
nsdentry = kernfs_node_dentry(cgrp->kn, sb); nsdentry = kernfs_node_dentry(cgrp->kn, sb);
dput(fc->root); dput(fc->root);
fc->root = nsdentry;
if (IS_ERR(nsdentry)) { if (IS_ERR(nsdentry)) {
ret = PTR_ERR(nsdentry);
deactivate_locked_super(sb); deactivate_locked_super(sb);
ret = PTR_ERR(nsdentry);
nsdentry = NULL;
} }
fc->root = nsdentry;
} }
if (!ctx->kfc.new_sb_created) if (!ctx->kfc.new_sb_created)

View File

@@ -1036,7 +1036,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
{ {
} }
void static inline void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next) perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{ {
} }
@@ -10540,6 +10540,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
goto err_ns; goto err_ns;
} }
/*
* Disallow uncore-cgroup events, they don't make sense as the cgroup will
* be different on other CPUs in the uncore mask.
*/
if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) {
err = -EINVAL;
goto err_pmu;
}
if (event->attr.aux_output && if (event->attr.aux_output &&
!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
@@ -11331,8 +11340,11 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
int err; int err;
/* /*
* Get the target context (task or percpu): * Grouping is not supported for kernel events, neither is 'AUX',
* make sure the caller's intentions are adjusted.
*/ */
if (attr->aux_output)
return ERR_PTR(-EINVAL);
event = perf_event_alloc(attr, cpu, task, NULL, NULL, event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context, -1); overflow_handler, context, -1);
@@ -11344,6 +11356,9 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
/* Mark owner so we could distinguish it from user events. */ /* Mark owner so we could distinguish it from user events. */
event->owner = TASK_TOMBSTONE; event->owner = TASK_TOMBSTONE;
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(event->pmu, task, event); ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
@@ -11795,7 +11810,7 @@ inherit_event(struct perf_event *parent_event,
GFP_KERNEL); GFP_KERNEL);
if (!child_ctx->task_ctx_data) { if (!child_ctx->task_ctx_data) {
free_event(child_event); free_event(child_event);
return NULL; return ERR_PTR(-ENOMEM);
} }
} }
@@ -11898,7 +11913,7 @@ static int inherit_group(struct perf_event *parent_event,
if (IS_ERR(child_ctr)) if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr); return PTR_ERR(child_ctr);
if (sub->aux_event == parent_event && if (sub->aux_event == parent_event && child_ctr &&
!perf_get_aux_event(child_ctr, leader)) !perf_get_aux_event(child_ctr, leader))
return -EINVAL; return -EINVAL;
} }

Some files were not shown because too many files have changed in this diff Show More